[kernel] r8364 - in dists/trunk/linux-2.6/debian: . patches/features/all/xen patches/series

Bastian Blank waldi at alioth.debian.org
Fri Mar 16 19:32:49 UTC 2007


Author: waldi
Date: Fri Mar 16 19:32:46 2007
New Revision: 8364

Added:
   dists/trunk/linux-2.6/debian/patches/features/all/xen/fedora-2.6.20-48670.patch
      - copied, changed from r8229, dists/trunk/linux-2.6/debian/patches/features/all/xen/fedora-2.6.18-36186.patch
   dists/trunk/linux-2.6/debian/patches/features/all/xen/update.patch   (contents, props changed)
Removed:
   dists/trunk/linux-2.6/debian/patches/features/all/xen/fedora-2.6.18-36186.patch
Modified:
   dists/trunk/linux-2.6/debian/changelog
   dists/trunk/linux-2.6/debian/patches/series/1~experimental.1-extra
Log:
* Update xen patch to changeset 48670 from fedora 2.6.20 branch.
* Fix identified problems with this patch.

* debian/changelog: Update.
* debian/patches/features/all/xen/fedora-2.6.18-36186.patch: Remove.
* debian/patches/features/all/xen/fedora-2.6.20-48670.patch,
  debian/patches/features/all/xen/update.patch: Add.
* debian/patches/series/1~experimental.1-extra: Update.


Modified: dists/trunk/linux-2.6/debian/changelog
==============================================================================
--- dists/trunk/linux-2.6/debian/changelog	(original)
+++ dists/trunk/linux-2.6/debian/changelog	Fri Mar 16 19:32:46 2007
@@ -125,6 +125,7 @@
     - rtc-pcf8563: detect polarity of century bit automatically
     - x86_64: fix 2.6.18 regression - PTRACE_OLDSETOPTIONS should be accepted
     - ocfs2: ocfs2_link() journal credits update
+  * Update xen patch to changeset 48670 from fedora 2.6.20 branch.
 
   [ Rod Whitby ]
   * arm/ixp4xx: Enable PATA_ARTOP for the nas100d and dsmg600.
@@ -163,7 +164,7 @@
   [ Frederik Schüler ]
   * Disable NAPI on forcedeth, it is broken.
 
- -- Bastian Blank <waldi at debian.org>  Thu, 15 Mar 2007 14:28:09 +0100
+ -- Bastian Blank <waldi at debian.org>  Fri, 16 Mar 2007 19:24:10 +0100
 
 linux-2.6 (2.6.18.dfsg.1-10) unstable; urgency=low
 

Copied: dists/trunk/linux-2.6/debian/patches/features/all/xen/fedora-2.6.20-48670.patch (from r8229, dists/trunk/linux-2.6/debian/patches/features/all/xen/fedora-2.6.18-36186.patch)
==============================================================================
--- dists/trunk/linux-2.6/debian/patches/features/all/xen/fedora-2.6.18-36186.patch	(original)
+++ dists/trunk/linux-2.6/debian/patches/features/all/xen/fedora-2.6.20-48670.patch	Fri Mar 16 19:32:46 2007
@@ -1,6 +1,6 @@
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/boot-xen/Makefile linux-2.6.18-xen/arch/i386/boot-xen/Makefile
---- linux-2.6.18.3/arch/i386/boot-xen/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/boot-xen/Makefile	2006-11-19 14:26:21.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/boot-xen/Makefile b/arch/i386/boot-xen/Makefile
+--- a/arch/i386/boot-xen/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/boot-xen/Makefile	2007-03-14 10:55:14.000000000 +0100
 @@ -0,0 +1,21 @@
 +
 +OBJCOPYFLAGS := -g --strip-unneeded
@@ -23,9 +23,9 @@
 +	install -m0664 .config $(INSTALL_ROOT)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
 +	install -m0664 System.map $(INSTALL_ROOT)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
 +	ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_ROOT)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/Kconfig linux-2.6.18-xen/arch/i386/Kconfig
---- linux-2.6.18.3/arch/i386/Kconfig	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/Kconfig	2006-12-05 18:42:36.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/Kconfig b/arch/i386/Kconfig
+--- a/arch/i386/Kconfig	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/Kconfig	2007-03-14 10:55:14.000000000 +0100
 @@ -16,6 +16,7 @@
  
  config GENERIC_TIME
@@ -34,7 +34,7 @@
  	default y
  
  config LOCKDEP_SUPPORT
-@@ -103,6 +104,15 @@
+@@ -108,6 +109,15 @@
  	help
  	  Choose this option if your computer is a standard PC or compatible.
  
@@ -50,7 +50,7 @@
  config X86_ELAN
  	bool "AMD Elan"
  	help
-@@ -213,6 +223,7 @@
+@@ -229,6 +239,7 @@
  
  config HPET_TIMER
  	bool "HPET Timer Support"
@@ -58,31 +58,31 @@
  	help
  	  This enables the use of the HPET for the kernel's internal timer.
  	  HPET is the next generation timer replacing legacy 8254s.
-@@ -263,7 +274,7 @@
+@@ -279,7 +290,7 @@
  
  config X86_UP_APIC
  	bool "Local APIC support on uniprocessors"
--	depends on !SMP && !(X86_VISWS || X86_VOYAGER)
-+	depends on !SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
+-	depends on !SMP && !(X86_VISWS || X86_VOYAGER || X86_GENERICARCH)
++	depends on !SMP && !(X86_VISWS || X86_VOYAGER || X86_GENERICARCH || XEN_UNPRIVILEGED_GUEST)
  	help
  	  A local APIC (Advanced Programmable Interrupt Controller) is an
  	  integrated interrupt controller in the CPU. If you have a single-CPU
-@@ -288,12 +299,12 @@
+@@ -304,12 +315,12 @@
  
  config X86_LOCAL_APIC
  	bool
--	depends on X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER)
-+	depends on X86_UP_APIC || ((X86_VISWS || SMP) && !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST))
+-	depends on X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER) || X86_GENERICARCH
++	depends on X86_UP_APIC || ((X86_VISWS || SMP) && !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)) || X86_GENERICARCH
  	default y
  
  config X86_IO_APIC
  	bool
--	depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER))
-+	depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST))
+-	depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER)) || X86_GENERICARCH
++	depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)) || X86_GENERICARCH
  	default y
  
  config X86_VISWS_APIC
-@@ -303,7 +314,7 @@
+@@ -319,7 +330,7 @@
  
  config X86_MCE
  	bool "Machine Check Exception"
@@ -91,15 +91,15 @@
  	---help---
  	  Machine Check Exception support allows the processor to notify the
  	  kernel if it detects a problem (e.g. overheating, component failure).
-@@ -402,6 +413,7 @@
+@@ -418,6 +429,7 @@
  
  config MICROCODE
  	tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
 +	depends on !XEN_UNPRIVILEGED_GUEST
+ 	select FW_LOADER
  	---help---
  	  If you say Y here and also to "/dev file system support" in the
- 	  'File systems' section, you will be able to update the microcode on
-@@ -419,6 +431,7 @@
+@@ -441,6 +453,7 @@
  
  config X86_MSR
  	tristate "/dev/cpu/*/msr - Model-specific register support"
@@ -107,7 +107,7 @@
  	help
  	  This device gives privileged processes access to the x86
  	  Model-Specific Registers (MSRs).  It is a character device with
-@@ -434,6 +447,10 @@
+@@ -456,6 +469,10 @@
  	  with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
  	  /dev/cpu/31/cpuid.
  
@@ -118,7 +118,7 @@
  source "drivers/firmware/Kconfig"
  
  choice
-@@ -607,7 +624,7 @@
+@@ -629,7 +646,7 @@
  
  config HIGHPTE
  	bool "Allocate 3rd-level pagetables from highmem"
@@ -127,7 +127,7 @@
  	help
  	  The VM uses one page table entry for each page of physical memory.
  	  For systems with a lot of RAM, this can be wasteful of precious
-@@ -616,6 +633,7 @@
+@@ -638,6 +655,7 @@
  
  config MATH_EMULATION
  	bool "Math emulation"
@@ -135,7 +135,7 @@
  	---help---
  	  Linux can emulate a math coprocessor (used for floating point
  	  operations) if you don't have one. 486DX and Pentium processors have
-@@ -641,6 +659,8 @@
+@@ -663,6 +681,8 @@
  
  config MTRR
  	bool "MTRR (Memory Type Range Register) support"
@@ -144,7 +144,7 @@
  	---help---
  	  On Intel P6 family processors (Pentium Pro, Pentium II and later)
  	  the Memory Type Range Registers (MTRRs) may be used to control
-@@ -675,7 +695,7 @@
+@@ -697,7 +717,7 @@
  
  config EFI
  	bool "Boot from EFI support"
@@ -152,8 +152,8 @@
 +	depends on ACPI && !X86_XEN
  	default n
  	---help---
- 	This enables the the kernel to boot on EFI platforms using
-@@ -693,7 +713,7 @@
+ 	This enables the kernel to boot on EFI platforms using
+@@ -715,7 +735,7 @@
  
  config IRQBALANCE
   	bool "Enable kernel irq balancing"
@@ -162,24 +162,24 @@
  	default y
  	help
   	  The default yes will allow the kernel to do irq load balancing.
-@@ -741,7 +761,7 @@
+@@ -749,6 +769,7 @@
  
  config KEXEC
- 	bool "kexec system call (EXPERIMENTAL)"
--	depends on EXPERIMENTAL
-+	depends on EXPERIMENTAL && !X86_XEN
+ 	bool "kexec system call"
++	depends on !X86_XEN
  	help
  	  kexec is a system call that implements the ability to shutdown your
  	  current kernel, and to start another kernel.  It is like a reboot
-@@ -794,6 +814,7 @@
+@@ -865,7 +886,7 @@
  config COMPAT_VDSO
  	bool "Compat VDSO support"
  	default y
+-	depends on !PARAVIRT
 +	depends on !X86_XEN
  	help
  	  Map the VDSO to the predictable old-style address too.
  	---help---
-@@ -810,18 +831,20 @@
+@@ -882,18 +903,20 @@
  	depends on HIGHMEM
  
  menu "Power management options (ACPI, APM)"
@@ -203,7 +203,7 @@
  	---help---
  	  APM is a BIOS specification for saving power using several different
  	  techniques. This is mostly useful for battery powered laptops with
-@@ -1006,6 +1029,7 @@
+@@ -1078,6 +1101,7 @@
  
  config PCI_GOBIOS
  	bool "BIOS"
@@ -211,7 +211,7 @@
  
  config PCI_GOMMCONFIG
  	bool "MMConfig"
-@@ -1013,6 +1037,13 @@
+@@ -1085,6 +1109,13 @@
  config PCI_GODIRECT
  	bool "Direct"
  
@@ -225,7 +225,7 @@
  config PCI_GOANY
  	bool "Any"
  
-@@ -1020,7 +1051,7 @@
+@@ -1092,7 +1123,7 @@
  
  config PCI_BIOS
  	bool
@@ -234,7 +234,7 @@
  	default y
  
  config PCI_DIRECT
-@@ -1033,6 +1064,18 @@
+@@ -1105,6 +1136,18 @@
  	depends on PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
  	default y
  
@@ -253,7 +253,7 @@
  source "drivers/pci/pcie/Kconfig"
  
  source "drivers/pci/Kconfig"
-@@ -1043,7 +1086,7 @@
+@@ -1115,7 +1158,7 @@
  
  config ISA
  	bool "ISA support"
@@ -262,7 +262,7 @@
  	help
  	  Find out whether you have ISA slots on your motherboard.  ISA is the
  	  name of a bus system, i.e. the way the CPU talks to the other stuff
-@@ -1070,7 +1113,7 @@
+@@ -1142,7 +1185,7 @@
  source "drivers/eisa/Kconfig"
  
  config MCA
@@ -271,7 +271,7 @@
  	default y if X86_VOYAGER
  	help
  	  MicroChannel Architecture is found in some IBM PS/2 machines and
-@@ -1146,6 +1189,8 @@
+@@ -1218,6 +1261,8 @@
  
  source "crypto/Kconfig"
  
@@ -280,7 +280,7 @@
  source "lib/Kconfig"
  
  #
-@@ -1171,7 +1216,7 @@
+@@ -1243,7 +1288,7 @@
  
  config X86_HT
  	bool
@@ -289,7 +289,7 @@
  	default y
  
  config X86_BIOS_REBOOT
-@@ -1184,6 +1229,16 @@
+@@ -1256,6 +1301,16 @@
  	depends on X86_SMP || (X86_VOYAGER && SMP)
  	default y
  
@@ -306,10 +306,10 @@
  config KTIME_SCALAR
  	bool
  	default y
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/Kconfig.cpu linux-2.6.18-xen/arch/i386/Kconfig.cpu
---- linux-2.6.18.3/arch/i386/Kconfig.cpu	2006-12-06 09:06:08.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/Kconfig.cpu	2006-12-05 18:42:36.000000000 +0100
-@@ -252,7 +252,7 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
+--- a/arch/i386/Kconfig.cpu	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/Kconfig.cpu	2007-03-14 10:55:14.000000000 +0100
+@@ -267,7 +267,7 @@
  
  config X86_F00F_BUG
  	bool
@@ -318,16 +318,16 @@
  	default y
  
  config X86_WP_WORKS_OK
-@@ -312,5 +312,5 @@
+@@ -327,5 +327,5 @@
  
  config X86_TSC
  	bool
--	depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX) && !X86_NUMAQ
-+	depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX) && !X86_NUMAQ && !X86_XEN
+-	depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ
++	depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ && !X86_XEN
  	default y
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/Kconfig.debug linux-2.6.18-xen/arch/i386/Kconfig.debug
---- linux-2.6.18.3/arch/i386/Kconfig.debug	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/Kconfig.debug	2006-12-05 18:42:36.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
+--- a/arch/i386/Kconfig.debug	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/Kconfig.debug	2007-03-14 10:55:14.000000000 +0100
 @@ -79,6 +79,7 @@
  config DOUBLEFAULT
  	default y
@@ -336,3092 +336,2436 @@
  	help
            This option allows trapping of rare doublefault exceptions that
            would otherwise cause a system to silently reboot. Disabling this
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/acpi/boot-xen.c linux-2.6.18-xen/arch/i386/kernel/acpi/boot-xen.c
---- linux-2.6.18.3/arch/i386/kernel/acpi/boot-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/acpi/boot-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,1168 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
+--- a/arch/i386/kernel/acpi/boot.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/acpi/boot.c	2007-03-14 10:55:14.000000000 +0100
+@@ -107,7 +107,7 @@
+  */
+ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
+ 
+-#ifdef	CONFIG_X86_64
++#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
+ 
+ /* rely on all ACPI tables being in the direct mapping */
+ char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
+@@ -140,8 +140,10 @@
+ 	unsigned long base, offset, mapped_size;
+ 	int idx;
+ 
++#ifndef CONFIG_XEN
+ 	if (phys + size < 8 * 1024 * 1024)
+ 		return __va(phys);
++#endif
+ 
+ 	offset = phys & (PAGE_SIZE - 1);
+ 	mapped_size = PAGE_SIZE - offset;
+@@ -611,7 +613,11 @@
+ 	 * RSDP signature.
+ 	 */
+ 	for (offset = 0; offset < length; offset += 16) {
++#ifdef CONFIG_XEN
++		if (strncmp((char *)((unsigned long)isa_bus_to_virt(start) + offset), "RSD PTR ", sig_len))
++#else
+ 		if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
++#endif
+ 			continue;
+ 		return (start + offset);
+ 	}
+@@ -724,7 +730,7 @@
+ 	acpi_fadt.force_apic_physical_destination_mode =
+ 	    fadt->force_apic_physical_destination_mode;
+ 
+-#ifdef CONFIG_X86_PM_TIMER
++#if defined(CONFIG_X86_PM_TIMER) && !defined(CONFIG_XEN)
+ 	/* detect the location of the ACPI PM Timer */
+ 	if (fadt->revision >= FADT2_REVISION_ID) {
+ 		/* FADT rev. 2 */
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/acpi/Makefile b/arch/i386/kernel/acpi/Makefile
+--- a/arch/i386/kernel/acpi/Makefile	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/acpi/Makefile	2007-03-14 10:55:14.000000000 +0100
+@@ -8,3 +8,7 @@
+ obj-y				+= cstate.o processor.o
+ endif
+ 
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++obj-y := $(call cherrypickxen, $(obj-y), $(src))
++endif
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
+--- a/arch/i386/kernel/alternative.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/alternative.c	2007-03-14 10:55:14.000000000 +0100
+@@ -5,7 +5,11 @@
+ #include <asm/alternative.h>
+ #include <asm/sections.h>
+ 
++#ifdef CONFIG_X86_64_XEN
++static int no_replacement    = 1;
++#else
+ static int no_replacement    = 0;
++#endif
+ static int smp_alt_once      = 0;
+ static int debug_alternative = 0;
+ 
+@@ -165,7 +169,11 @@
+ #ifdef CONFIG_X86_64
+ 		/* vsyscall code is not mapped yet. resolve it manually. */
+ 		if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
++#ifdef CONFIG_XEN
++			instr = __va(instr - (u8*)VSYSCALL_START + (u8*)phys_to_machine(__pa_symbol(&__vsyscall_0)));
++#else
+ 			instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
++#endif
+ 			DPRINTK("%s: vsyscall fixup: %p => %p\n",
+ 				__FUNCTION__, a->instr, instr);
+ 		}
+@@ -388,9 +396,12 @@
+ 	unsigned long flags;
+ 	if (no_replacement) {
+ 		printk(KERN_INFO "(SMP-)alternatives turned off\n");
++#ifndef CONFIG_X86_64
++/* ToDo: x86_64 put something strange there, not sure what yet */
+ 		free_init_pages("SMP alternatives",
+ 				(unsigned long)__smp_alt_begin,
+ 				(unsigned long)__smp_alt_end);
++#endif
+ 		return;
+ 	}
+ 
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/apic-xen.c b/arch/i386/kernel/apic-xen.c
+--- a/arch/i386/kernel/apic-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/apic-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,223 @@
 +/*
-+ *  boot.c - Architecture-Specific Low-Level ACPI Boot Support
-+ *
-+ *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh at intel.com>
-+ *  Copyright (C) 2001 Jun Nakajima <jun.nakajima at intel.com>
-+ *
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *	Local APIC handling, local APIC timers
 + *
-+ *  This program is free software; you can redistribute it and/or modify
-+ *  it under the terms of the GNU General Public License as published by
-+ *  the Free Software Foundation; either version 2 of the License, or
-+ *  (at your option) any later version.
-+ *
-+ *  This program is distributed in the hope that it will be useful,
-+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *  GNU General Public License for more details.
-+ *
-+ *  You should have received a copy of the GNU General Public License
-+ *  along with this program; if not, write to the Free Software
-+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *	(c) 1999, 2000 Ingo Molnar <mingo at redhat.com>
 + *
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *	Fixes
++ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
++ *					thanks to Eric Gilmore
++ *					and Rolf G. Tews
++ *					for testing these extensively.
++ *	Maciej W. Rozycki	:	Various updates and fixes.
++ *	Mikael Pettersson	:	Power Management for UP-APIC.
++ *	Pavel Machek and
++ *	Mikael Pettersson	:	PM converted to driver model.
 + */
 +
 +#include <linux/init.h>
-+#include <linux/acpi.h>
-+#include <linux/efi.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++#include <linux/cpu.h>
 +#include <linux/module.h>
-+#include <linux/dmi.h>
-+#include <linux/irq.h>
 +
-+#include <asm/pgtable.h>
-+#include <asm/io_apic.h>
-+#include <asm/apic.h>
-+#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
 +#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++#include <asm/i8253.h>
++#include <asm/nmi.h>
 +
-+#ifdef	CONFIG_X86_64
-+
-+extern void __init clustered_apic_check(void);
-+
-+extern int gsi_irq_sharing(int gsi);
-+#include <asm/proto.h>
-+
-+static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
-+
-+
-+#else				/* X86 */
-+
-+#ifdef	CONFIG_X86_LOCAL_APIC
 +#include <mach_apic.h>
-+#include <mach_mpparse.h>
-+#endif				/* CONFIG_X86_LOCAL_APIC */
-+
-+static inline int gsi_irq_sharing(int gsi) { return gsi; }
-+
-+#endif				/* X86 */
-+
-+#define BAD_MADT_ENTRY(entry, end) (					    \
-+		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
-+		((acpi_table_entry_header *)entry)->length < sizeof(*entry))
-+
-+#define PREFIX			"ACPI: "
-+
-+int acpi_noirq __initdata;	/* skip ACPI IRQ initialization */
-+int acpi_pci_disabled __initdata;	/* skip ACPI PCI scan and IRQ initialization */
-+int acpi_ht __initdata = 1;	/* enable HT */
-+
-+int acpi_lapic;
-+int acpi_ioapic;
-+int acpi_strict;
-+EXPORT_SYMBOL(acpi_strict);
-+
-+acpi_interrupt_flags acpi_sci_flags __initdata;
-+int acpi_sci_override_gsi __initdata;
-+int acpi_skip_timer_override __initdata;
++#include <mach_apicdef.h>
++#include <mach_ipi.h>
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
-+#endif
++#include "io_ports.h"
 +
-+#ifndef __HAVE_ARCH_CMPXCHG
-+#warning ACPI uses CMPXCHG, i486 and later hardware
++#ifndef CONFIG_XEN
++/*
++ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
++ * IPIs in place of local APIC timers
++ */
++static cpumask_t timer_bcast_ipi;
 +#endif
 +
-+#define MAX_MADT_ENTRIES	256
-+u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
-+    {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
-+EXPORT_SYMBOL(x86_acpiid_to_apicid);
-+
-+/* --------------------------------------------------------------------------
-+                              Boot-time Configuration
-+   -------------------------------------------------------------------------- */
-+
 +/*
-+ * The default interrupt routing model is PIC (8259).  This gets
-+ * overriden if IOAPICs are enumerated (below).
++ * Knob to control our willingness to enable the local APIC.
 + */
-+enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
-+
-+#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
++static int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
 +
-+/* rely on all ACPI tables being in the direct mapping */
-+char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
++static inline void lapic_disable(void)
 +{
-+	if (!phys_addr || !size)
-+		return NULL;
-+
-+	if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
-+		return __va(phys_addr);
-+
-+	return NULL;
++	enable_local_apic = -1;
++	clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
 +}
 +
-+#else
++static inline void lapic_enable(void)
++{
++	enable_local_apic = 1;
++}
 +
 +/*
-+ * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
-+ * to map the target physical address. The problem is that set_fixmap()
-+ * provides a single page, and it is possible that the page is not
-+ * sufficient.
-+ * By using this area, we can map up to MAX_IO_APICS pages temporarily,
-+ * i.e. until the next __va_range() call.
-+ *
-+ * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
-+ * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
-+ * count idx down while incrementing the phys address.
++ * Debug level
 + */
-+char *__acpi_map_table(unsigned long phys, unsigned long size)
-+{
-+	unsigned long base, offset, mapped_size;
-+	int idx;
++int apic_verbosity;
 +
++static int modern_apic(void)
++{
 +#ifndef CONFIG_XEN
-+	if (phys + size < 8 * 1024 * 1024)
-+		return __va(phys);
++	unsigned int lvr, version;
++	/* AMD systems use old APIC versions, so check the CPU */
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++		boot_cpu_data.x86 >= 0xf)
++		return 1;
++	lvr = apic_read(APIC_LVR);
++	version = GET_APIC_VERSION(lvr);
++	return version >= 0x14;
++#else
++	return 1;
 +#endif
++}
 +
-+	offset = phys & (PAGE_SIZE - 1);
-+	mapped_size = PAGE_SIZE - offset;
-+	set_fixmap(FIX_ACPI_END, phys);
-+	base = fix_to_virt(FIX_ACPI_END);
-+
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++	printk("unexpected IRQ trap at vector %02x\n", irq);
 +	/*
-+	 * Most cases can be covered by the below.
++	 * Currently unexpected vectors happen only on SMP and APIC.
++	 * We _must_ ack these because every local APIC has only N
++	 * irq slots per priority level, and a 'hanging, unacked' IRQ
++	 * holds up an irq slot - in excessive cases (when multiple
++	 * unexpected vectors occur) that might lock up the APIC
++	 * completely.
++	 * But only ack when the APIC is enabled -AK
 +	 */
-+	idx = FIX_ACPI_END;
-+	while (mapped_size < size) {
-+		if (--idx < FIX_ACPI_BEGIN)
-+			return NULL;	/* cannot handle this */
-+		phys += PAGE_SIZE;
-+		set_fixmap(idx, phys);
-+		mapped_size += PAGE_SIZE;
-+	}
-+
-+	return ((unsigned char *)base + offset);
++	if (cpu_has_apic)
++		ack_APIC_irq();
 +}
-+#endif
-+
-+#ifdef CONFIG_PCI_MMCONFIG
-+/* The physical address of the MMCONFIG aperture.  Set from ACPI tables. */
-+struct acpi_table_mcfg_config *pci_mmcfg_config;
-+int pci_mmcfg_config_num;
 +
-+int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
++#ifndef CONFIG_XEN
++void __init apic_intr_init(void)
 +{
-+	struct acpi_table_mcfg *mcfg;
-+	unsigned long i;
-+	int config_size;
++#ifdef CONFIG_SMP
++	smp_intr_init();
++#endif
++	/* self generated IPI for local APIC timer */
++	set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
 +
-+	if (!phys_addr || !size)
-+		return -EINVAL;
++	/* IPI vectors for APIC spurious and error interrupts */
++	set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
++	set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
 +
-+	mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
-+	if (!mcfg) {
-+		printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
-+		return -ENODEV;
-+	}
++	/* thermal monitor LVT interrupt */
++#ifdef CONFIG_X86_MCE_P4THERMAL
++	set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
++#endif
++}
 +
-+	/* how many config structures do we have */
-+	pci_mmcfg_config_num = 0;
-+	i = size - sizeof(struct acpi_table_mcfg);
-+	while (i >= sizeof(struct acpi_table_mcfg_config)) {
-+		++pci_mmcfg_config_num;
-+		i -= sizeof(struct acpi_table_mcfg_config);
-+	};
-+	if (pci_mmcfg_config_num == 0) {
-+		printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
-+		return -ENODEV;
-+	}
++/* Using APIC to generate smp_local_timer_interrupt? */
++int using_apic_timer __read_mostly = 0;
 +
-+	config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
-+	pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
-+	if (!pci_mmcfg_config) {
-+		printk(KERN_WARNING PREFIX
-+		       "No memory for MCFG config tables\n");
-+		return -ENOMEM;
-+	}
++static int enabled_via_apicbase;
 +
-+	memcpy(pci_mmcfg_config, &mcfg->config, config_size);
-+	for (i = 0; i < pci_mmcfg_config_num; ++i) {
-+		if (mcfg->config[i].base_reserved) {
-+			printk(KERN_ERR PREFIX
-+			       "MMCONFIG not in low 4GB of memory\n");
-+			kfree(pci_mmcfg_config);
-+			pci_mmcfg_config_num = 0;
-+			return -ENODEV;
-+		}
-+	}
++void enable_NMI_through_LVT0 (void * dummy)
++{
++	unsigned int v, ver;
 +
-+	return 0;
++	ver = apic_read(APIC_LVR);
++	ver = GET_APIC_VERSION(ver);
++	v = APIC_DM_NMI;			/* unmask and set to NMI */
++	if (!APIC_INTEGRATED(ver))		/* 82489DX */
++		v |= APIC_LVT_LEVEL_TRIGGER;
++	apic_write_around(APIC_LVT0, v);
 +}
-+#endif				/* CONFIG_PCI_MMCONFIG */
++#endif /* !CONFIG_XEN */
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
++int get_physical_broadcast(void)
 +{
-+	struct acpi_table_madt *madt = NULL;
-+
-+	if (!phys_addr || !size || !cpu_has_apic)
-+		return -EINVAL;
-+
-+	madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
-+	if (!madt) {
-+		printk(KERN_WARNING PREFIX "Unable to map MADT\n");
-+		return -ENODEV;
-+	}
-+
-+	if (madt->lapic_address) {
-+		acpi_lapic_addr = (u64) madt->lapic_address;
-+
-+		printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
-+		       madt->lapic_address);
-+	}
-+
-+	acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
-+
-+	return 0;
++	if (modern_apic())
++		return 0xff;
++	else
++		return 0xf;
 +}
 +
-+static int __init
-+acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
++#ifndef CONFIG_XEN
++#ifndef CONFIG_SMP
++static void up_apic_timer_interrupt_call(void)
 +{
-+	struct acpi_table_lapic *processor = NULL;
-+
-+	processor = (struct acpi_table_lapic *)header;
-+
-+	if (BAD_MADT_ENTRY(processor, end))
-+		return -EINVAL;
-+
-+	acpi_table_print_madt_entry(header);
-+
-+	/* Record local apic id only when enabled */
-+	if (processor->flags.enabled)
-+		x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
++	int cpu = smp_processor_id();
 +
 +	/*
-+	 * We need to register disabled CPU as well to permit
-+	 * counting disabled CPUs. This allows us to size
-+	 * cpus_possible_map more accurately, to permit
-+	 * to not preallocating memory for all NR_CPUS
-+	 * when we use CPU hotplug.
++	 * the NMI deadlock-detector uses this.
 +	 */
-+	mp_register_lapic(processor->id,	/* APIC ID */
-+			  processor->flags.enabled);	/* Enabled? */
++	per_cpu(irq_stat, cpu).apic_timer_irqs++;
 +
-+	return 0;
++	smp_local_timer_interrupt();
 +}
++#endif
 +
-+static int __init
-+acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
-+			  const unsigned long end)
++void smp_send_timer_broadcast_ipi(void)
 +{
-+	struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
-+
-+	lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
-+
-+	if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
-+		return -EINVAL;
-+
-+	acpi_lapic_addr = lapic_addr_ovr->address;
++	cpumask_t mask;
 +
-+	return 0;
++	cpus_and(mask, cpu_online_map, timer_bcast_ipi);
++	if (!cpus_empty(mask)) {
++#ifdef CONFIG_SMP
++		send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
++#else
++		/*
++		 * We can directly call the apic timer interrupt handler
++		 * in UP case. Minus all irq related functions
++		 */
++		up_apic_timer_interrupt_call();
++#endif
++	}
 +}
++#endif
 +
-+static int __init
-+acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
++int setup_profiling_timer(unsigned int multiplier)
 +{
-+	struct acpi_table_lapic_nmi *lapic_nmi = NULL;
-+
-+	lapic_nmi = (struct acpi_table_lapic_nmi *)header;
-+
-+	if (BAD_MADT_ENTRY(lapic_nmi, end))
-+		return -EINVAL;
-+
-+	acpi_table_print_madt_entry(header);
++	return -EINVAL;
++}
 +
-+	if (lapic_nmi->lint != 1)
-+		printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++	if (smp_found_config)
++		if (!skip_ioapic_setup && nr_ioapics)
++			setup_IO_APIC();
++#endif
 +
 +	return 0;
 +}
 +
-+#endif				/*CONFIG_X86_LOCAL_APIC */
-+
-+#ifdef CONFIG_X86_IO_APIC
-+
-+static int __init
-+acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
++static int __init parse_lapic(char *arg)
 +{
-+	struct acpi_table_ioapic *ioapic = NULL;
-+
-+	ioapic = (struct acpi_table_ioapic *)header;
-+
-+	if (BAD_MADT_ENTRY(ioapic, end))
-+		return -EINVAL;
-+
-+	acpi_table_print_madt_entry(header);
-+
-+	mp_register_ioapic(ioapic->id,
-+			   ioapic->address, ioapic->global_irq_base);
-+
++	lapic_enable();
 +	return 0;
 +}
++early_param("lapic", parse_lapic);
 +
-+/*
-+ * Parse Interrupt Source Override for the ACPI SCI
-+ */
-+static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
++static int __init parse_nolapic(char *arg)
 +{
-+	if (trigger == 0)	/* compatible SCI trigger is level */
-+		trigger = 3;
-+
-+	if (polarity == 0)	/* compatible SCI polarity is low */
-+		polarity = 3;
++	lapic_disable();
++	return 0;
++}
++early_param("nolapic", parse_nolapic);
 +
-+	/* Command-line over-ride via acpi_sci= */
-+	if (acpi_sci_flags.trigger)
-+		trigger = acpi_sci_flags.trigger;
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
+--- a/arch/i386/kernel/asm-offsets.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/asm-offsets.c	2007-03-14 10:55:14.000000000 +0100
+@@ -89,9 +89,14 @@
+ 	OFFSET(pbe_orig_address, pbe, orig_address);
+ 	OFFSET(pbe_next, pbe, next);
+ 
++#ifndef CONFIG_X86_NO_TSS
+ 	/* Offset from the sysenter stack to tss.esp0 */
+-	DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, esp0) -
++	DEFINE(SYSENTER_stack_esp0, offsetof(struct tss_struct, esp0) -
+ 		 sizeof(struct tss_struct));
++#else
++	/* sysenter stack points directly to esp0 */
++	DEFINE(SYSENTER_stack_esp0, 0);
++#endif
+ 
+ 	DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
+ 	DEFINE(VDSO_PRELINK, VDSO_PRELINK);
+@@ -111,4 +116,10 @@
+ 	OFFSET(PARAVIRT_iret, paravirt_ops, iret);
+ 	OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0);
+ #endif
 +
-+	if (acpi_sci_flags.polarity)
-+		polarity = acpi_sci_flags.polarity;
 +
-+	/*
-+	 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
-+	 * If GSI is < 16, this will update its flags,
-+	 * else it will create a new mp_irqs[] entry.
-+	 */
-+	mp_override_legacy_irq(gsi, polarity, trigger, gsi);
++#ifdef CONFIG_XEN
++	BLANK();
++	OFFSET(XEN_START_mfn_list, start_info, mfn_list);
++#endif
+ }
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/cpu/common-xen.c b/arch/i386/kernel/cpu/common-xen.c
+--- a/arch/i386/kernel/cpu/common-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/cpu/common-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,868 @@
++#include <linux/init.h>
++#include <linux/string.h>
++#include <linux/delay.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <linux/bootmem.h>
++#include <asm/semaphore.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/msr.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++#include <asm/mtrr.h>
++#include <asm/mce.h>
++#ifdef CONFIG_X86_LOCAL_APIC
++#include <asm/mpspec.h>
++#include <asm/apic.h>
++#include <mach_apic.h>
++#endif
++#include <asm/pda.h>
++#include <asm/hypervisor.h>
 +
-+	/*
-+	 * stash over-ride to indicate we've been here
-+	 * and for later update of acpi_fadt
-+	 */
-+	acpi_sci_override_gsi = gsi;
-+	return;
-+}
++#include "cpu.h"
 +
-+static int __init
-+acpi_parse_int_src_ovr(acpi_table_entry_header * header,
-+		       const unsigned long end)
-+{
-+	struct acpi_table_int_src_ovr *intsrc = NULL;
++DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
++EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
 +
-+	intsrc = (struct acpi_table_int_src_ovr *)header;
++struct i386_pda *_cpu_pda[NR_CPUS] __read_mostly;
++EXPORT_SYMBOL(_cpu_pda);
 +
-+	if (BAD_MADT_ENTRY(intsrc, end))
-+		return -EINVAL;
++static int cachesize_override __cpuinitdata = -1;
++static int disable_x86_fxsr __cpuinitdata;
++static int disable_x86_serial_nr __cpuinitdata = 1;
++static int disable_x86_sep __cpuinitdata;
 +
-+	acpi_table_print_madt_entry(header);
++struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
 +
-+	if (intsrc->bus_irq == acpi_fadt.sci_int) {
-+		acpi_sci_ioapic_setup(intsrc->global_irq,
-+				      intsrc->flags.polarity,
-+				      intsrc->flags.trigger);
-+		return 0;
-+	}
++extern int disable_pse;
 +
-+	if (acpi_skip_timer_override &&
-+	    intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
-+		printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
-+		return 0;
++static void __cpuinit default_init(struct cpuinfo_x86 * c)
++{
++	/* Not much we can do here... */
++	/* Check if at least it has cpuid */
++	if (c->cpuid_level == -1) {
++		/* No cpuid. It must be an ancient CPU */
++		if (c->x86 == 4)
++			strcpy(c->x86_model_id, "486");
++		else if (c->x86 == 3)
++			strcpy(c->x86_model_id, "386");
 +	}
++}
 +
-+	mp_override_legacy_irq(intsrc->bus_irq,
-+			       intsrc->flags.polarity,
-+			       intsrc->flags.trigger, intsrc->global_irq);
++static struct cpu_dev __cpuinitdata default_cpu = {
++	.c_init	= default_init,
++	.c_vendor = "Unknown",
++};
++static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
 +
-+	return 0;
++static int __init cachesize_setup(char *str)
++{
++	get_option (&str, &cachesize_override);
++	return 1;
 +}
++__setup("cachesize=", cachesize_setup);
 +
-+static int __init
-+acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
++int __cpuinit get_model_name(struct cpuinfo_x86 *c)
 +{
-+	struct acpi_table_nmi_src *nmi_src = NULL;
-+
-+	nmi_src = (struct acpi_table_nmi_src *)header;
++	unsigned int *v;
++	char *p, *q;
 +
-+	if (BAD_MADT_ENTRY(nmi_src, end))
-+		return -EINVAL;
++	if (cpuid_eax(0x80000000) < 0x80000004)
++		return 0;
 +
-+	acpi_table_print_madt_entry(header);
++	v = (unsigned int *) c->x86_model_id;
++	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++	c->x86_model_id[48] = 0;
 +
-+	/* TBD: Support nimsrc entries? */
++	/* Intel chips right-justify this string for some dumb reason;
++	   undo that brain damage */
++	p = q = &c->x86_model_id[0];
++	while ( *p == ' ' )
++	     p++;
++	if ( p != q ) {
++	     while ( *p )
++		  *q++ = *p++;
++	     while ( q <= &c->x86_model_id[48] )
++		  *q++ = '\0';	/* Zero-pad the rest */
++	}
 +
-+	return 0;
++	return 1;
 +}
 +
-+#endif				/* CONFIG_X86_IO_APIC */
-+
-+/*
-+ * acpi_pic_sci_set_trigger()
-+ * 
-+ * use ELCR to set PIC-mode trigger type for SCI
-+ *
-+ * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
-+ * it may require Edge Trigger -- use "acpi_sci=edge"
-+ *
-+ * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
-+ * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
-+ * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
-+ * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
-+ */
 +
-+void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
++void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
 +{
-+	unsigned int mask = 1 << irq;
-+	unsigned int old, new;
-+
-+	/* Real old ELCR mask */
-+	old = inb(0x4d0) | (inb(0x4d1) << 8);
++	unsigned int n, dummy, ecx, edx, l2size;
 +
-+	/*
-+	 * If we use ACPI to set PCI irq's, then we should clear ELCR
-+	 * since we will set it correctly as we enable the PCI irq
-+	 * routing.
-+	 */
-+	new = acpi_noirq ? old : 0;
++	n = cpuid_eax(0x80000000);
 +
-+	/*
-+	 * Update SCI information in the ELCR, it isn't in the PCI
-+	 * routing tables..
-+	 */
-+	switch (trigger) {
-+	case 1:		/* Edge - clear */
-+		new &= ~mask;
-+		break;
-+	case 3:		/* Level - set */
-+		new |= mask;
-+		break;
++	if (n >= 0x80000005) {
++		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
++		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++		c->x86_cache_size=(ecx>>24)+(edx>>24);	
 +	}
 +
-+	if (old == new)
++	if (n < 0x80000006)	/* Some chips just has a large L1. */
 +		return;
 +
-+	printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
-+	outb(new, 0x4d0);
-+	outb(new >> 8, 0x4d1);
-+}
-+
-+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
-+{
-+#ifdef CONFIG_X86_IO_APIC
-+	if (use_pci_vector() && !platform_legacy_irq(gsi))
-+		*irq = IO_APIC_VECTOR(gsi);
-+	else
-+#endif
-+		*irq = gsi_irq_sharing(gsi);
-+	return 0;
-+}
++	ecx = cpuid_ecx(0x80000006);
++	l2size = ecx >> 16;
++	
++	/* do processor-specific cache resizing */
++	if (this_cpu->c_size_cache)
++		l2size = this_cpu->c_size_cache(c,l2size);
 +
-+/*
-+ * success: return IRQ number (>=0)
-+ * failure: return < 0
-+ */
-+int acpi_register_gsi(u32 gsi, int triggering, int polarity)
-+{
-+	unsigned int irq;
-+	unsigned int plat_gsi = gsi;
++	/* Allow user to override all this if necessary. */
++	if (cachesize_override != -1)
++		l2size = cachesize_override;
 +
-+#ifdef CONFIG_PCI
-+	/*
-+	 * Make sure all (legacy) PCI IRQs are set as level-triggered.
-+	 */
-+	if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
-+		extern void eisa_set_level_irq(unsigned int irq);
++	if ( l2size == 0 )
++		return;		/* Again, no L2 cache is possible */
 +
-+		if (triggering == ACPI_LEVEL_SENSITIVE)
-+			eisa_set_level_irq(gsi);
-+	}
-+#endif
++	c->x86_cache_size = l2size;
 +
-+#ifdef CONFIG_X86_IO_APIC
-+	if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
-+		plat_gsi = mp_register_gsi(gsi, triggering, polarity);
-+	}
-+#endif
-+	acpi_gsi_to_irq(plat_gsi, &irq);
-+	return irq;
++	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++	       l2size, ecx & 0xFF);
 +}
 +
-+EXPORT_SYMBOL(acpi_register_gsi);
++/* Naming convention should be: <Name> [(<Codename>)] */
++/* This table only is used unless init_<vendor>() below doesn't set it; */
++/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
 +
-+/*
-+ *  ACPI based hotplug support for CPU
-+ */
-+#ifdef CONFIG_ACPI_HOTPLUG_CPU
-+int acpi_map_lsapic(acpi_handle handle, int *pcpu)
++/* Look up CPU names by table lookup. */
++static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
 +{
-+	/* TBD */
-+	return -EINVAL;
-+}
++	struct cpu_model_info *info;
 +
-+EXPORT_SYMBOL(acpi_map_lsapic);
++	if ( c->x86_model >= 16 )
++		return NULL;	/* Range check */
 +
-+int acpi_unmap_lsapic(int cpu)
-+{
-+	/* TBD */
-+	return -EINVAL;
-+}
++	if (!this_cpu)
++		return NULL;
 +
-+EXPORT_SYMBOL(acpi_unmap_lsapic);
-+#endif				/* CONFIG_ACPI_HOTPLUG_CPU */
++	info = this_cpu->c_models;
 +
-+int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
-+{
-+	/* TBD */
-+	return -EINVAL;
++	while (info && info->family) {
++		if (info->family == c->x86)
++			return info->model_names[c->x86_model];
++		info++;
++	}
++	return NULL;		/* Not found */
 +}
 +
-+EXPORT_SYMBOL(acpi_register_ioapic);
 +
-+int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
 +{
-+	/* TBD */
-+	return -EINVAL;
++	char *v = c->x86_vendor_id;
++	int i;
++	static int printed;
++
++	for (i = 0; i < X86_VENDOR_NUM; i++) {
++		if (cpu_devs[i]) {
++			if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
++			    (cpu_devs[i]->c_ident[1] && 
++			     !strcmp(v,cpu_devs[i]->c_ident[1]))) {
++				c->x86_vendor = i;
++				if (!early)
++					this_cpu = cpu_devs[i];
++				return;
++			}
++		}
++	}
++	if (!printed) {
++		printed++;
++		printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
++		printk(KERN_ERR "CPU: Your system may be unstable.\n");
++	}
++	c->x86_vendor = X86_VENDOR_UNKNOWN;
++	this_cpu = &default_cpu;
 +}
 +
-+EXPORT_SYMBOL(acpi_unregister_ioapic);
 +
-+static unsigned long __init
-+acpi_scan_rsdp(unsigned long start, unsigned long length)
++static int __init x86_fxsr_setup(char * s)
 +{
-+	unsigned long offset = 0;
-+	unsigned long sig_len = sizeof("RSD PTR ") - 1;
-+	unsigned long vstart = (unsigned long)isa_bus_to_virt(start);
++	/* Tell all the other CPU's to not use it... */
++	disable_x86_fxsr = 1;
 +
 +	/*
-+	 * Scan all 16-byte boundaries of the physical memory region for the
-+	 * RSDP signature.
++	 * ... and clear the bits early in the boot_cpu_data
++	 * so that the bootup process doesn't try to do this
++	 * either.
 +	 */
-+	for (offset = 0; offset < length; offset += 16) {
-+		if (strncmp((char *)(vstart + offset), "RSD PTR ", sig_len))
-+			continue;
-+		return (start + offset);
-+	}
-+
-+	return 0;
++	clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
++	clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
++	return 1;
 +}
++__setup("nofxsr", x86_fxsr_setup);
 +
-+static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
-+{
-+	struct acpi_table_sbf *sb;
-+
-+	if (!phys_addr || !size)
-+		return -EINVAL;
-+
-+	sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
-+	if (!sb) {
-+		printk(KERN_WARNING PREFIX "Unable to map SBF\n");
-+		return -ENODEV;
-+	}
-+
-+	sbf_port = sb->sbf_cmos;	/* Save CMOS port */
 +
-+	return 0;
++static int __init x86_sep_setup(char * s)
++{
++	disable_x86_sep = 1;
++	return 1;
 +}
++__setup("nosep", x86_sep_setup);
 +
-+#ifdef CONFIG_HPET_TIMER
 +
-+static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
++/* Standard macro to see if a specific flag is changeable */
++static inline int flag_is_changeable_p(u32 flag)
 +{
-+	struct acpi_table_hpet *hpet_tbl;
++	u32 f1, f2;
 +
-+	if (!phys || !size)
-+		return -EINVAL;
++	asm("pushfl\n\t"
++	    "pushfl\n\t"
++	    "popl %0\n\t"
++	    "movl %0,%1\n\t"
++	    "xorl %2,%0\n\t"
++	    "pushl %0\n\t"
++	    "popfl\n\t"
++	    "pushfl\n\t"
++	    "popl %0\n\t"
++	    "popfl\n\t"
++	    : "=&r" (f1), "=&r" (f2)
++	    : "ir" (flag));
 +
-+	hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
-+	if (!hpet_tbl) {
-+		printk(KERN_WARNING PREFIX "Unable to map HPET\n");
-+		return -ENODEV;
-+	}
++	return ((f1^f2) & flag) != 0;
++}
 +
-+	if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
-+		printk(KERN_WARNING PREFIX "HPET timers must be located in "
-+		       "memory.\n");
-+		return -1;
-+	}
-+#ifdef	CONFIG_X86_64
-+	vxtime.hpet_address = hpet_tbl->addr.addrl |
-+	    ((long)hpet_tbl->addr.addrh << 32);
-+
-+	printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
-+	       hpet_tbl->id, vxtime.hpet_address);
-+#else				/* X86 */
-+	{
-+		extern unsigned long hpet_address;
 +
-+		hpet_address = hpet_tbl->addr.addrl;
-+		printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
-+		       hpet_tbl->id, hpet_address);
-+	}
-+#endif				/* X86 */
++/* Probe for the CPUID instruction */
++static int __cpuinit have_cpuid_p(void)
++{
++	return flag_is_changeable_p(X86_EFLAGS_ID);
++}
 +
-+	return 0;
++void __init cpu_detect(struct cpuinfo_x86 *c)
++{
++	/* Get vendor name */
++	cpuid(0x00000000, &c->cpuid_level,
++	      (int *)&c->x86_vendor_id[0],
++	      (int *)&c->x86_vendor_id[8],
++	      (int *)&c->x86_vendor_id[4]);
++
++	c->x86 = 4;
++	if (c->cpuid_level >= 0x00000001) {
++		u32 junk, tfms, cap0, misc;
++		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
++		c->x86 = (tfms >> 8) & 15;
++		c->x86_model = (tfms >> 4) & 15;
++		if (c->x86 == 0xf)
++			c->x86 += (tfms >> 20) & 0xff;
++		if (c->x86 >= 0x6)
++			c->x86_model += ((tfms >> 16) & 0xF) << 4;
++		c->x86_mask = tfms & 15;
++		if (cap0 & (1<<19))
++			c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
++	}
 +}
-+#else
-+#define	acpi_parse_hpet	NULL
-+#endif
 +
-+#ifdef CONFIG_X86_PM_TIMER
-+extern u32 pmtmr_ioport;
-+#endif
++/* Do minimum CPU detection early.
++   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
++   The others are not touched to avoid unwanted side effects.
 +
-+static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
++   WARNING: this function is only called on the BP.  Don't add code here
++   that is supposed to run on all CPUs. */
++static void __init early_cpu_detect(void)
 +{
-+	struct fadt_descriptor *fadt = NULL;
++	struct cpuinfo_x86 *c = &boot_cpu_data;
 +
-+	fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
-+	if (!fadt) {
-+		printk(KERN_WARNING PREFIX "Unable to map FADT\n");
-+		return 0;
-+	}
-+	/* initialize sci_int early for INT_SRC_OVR MADT parsing */
-+	acpi_fadt.sci_int = fadt->sci_int;
++	c->x86_cache_alignment = 32;
 +
-+	/* initialize rev and apic_phys_dest_mode for x86_64 genapic */
-+	acpi_fadt.revision = fadt->revision;
-+	acpi_fadt.force_apic_physical_destination_mode =
-+	    fadt->force_apic_physical_destination_mode;
++	if (!have_cpuid_p())
++		return;
 +
-+#if defined(CONFIG_X86_PM_TIMER) && !defined(CONFIG_XEN)
-+	/* detect the location of the ACPI PM Timer */
-+	if (fadt->revision >= FADT2_REVISION_ID) {
-+		/* FADT rev. 2 */
-+		if (fadt->xpm_tmr_blk.address_space_id !=
-+		    ACPI_ADR_SPACE_SYSTEM_IO)
-+			return 0;
++	cpu_detect(c);
 +
-+		pmtmr_ioport = fadt->xpm_tmr_blk.address;
-+		/*
-+		 * "X" fields are optional extensions to the original V1.0
-+		 * fields, so we must selectively expand V1.0 fields if the
-+		 * corresponding X field is zero.
-+	 	 */
-+		if (!pmtmr_ioport)
-+			pmtmr_ioport = fadt->V1_pm_tmr_blk;
-+	} else {
-+		/* FADT rev. 1 */
-+		pmtmr_ioport = fadt->V1_pm_tmr_blk;
++	get_cpu_vendor(c, 1);
++}
++
++static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
++{
++	u32 tfms, xlvl;
++	int ebx;
++
++	if (have_cpuid_p()) {
++		/* Get vendor name */
++		cpuid(0x00000000, &c->cpuid_level,
++		      (int *)&c->x86_vendor_id[0],
++		      (int *)&c->x86_vendor_id[8],
++		      (int *)&c->x86_vendor_id[4]);
++		
++		get_cpu_vendor(c, 0);
++		/* Initialize the standard set of capabilities */
++		/* Note that the vendor-specific code below might override */
++	
++		/* Intel-defined flags: level 0x00000001 */
++		if ( c->cpuid_level >= 0x00000001 ) {
++			u32 capability, excap;
++			cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
++			c->x86_capability[0] = capability;
++			c->x86_capability[4] = excap;
++			c->x86 = (tfms >> 8) & 15;
++			c->x86_model = (tfms >> 4) & 15;
++			if (c->x86 == 0xf)
++				c->x86 += (tfms >> 20) & 0xff;
++			if (c->x86 >= 0x6)
++				c->x86_model += ((tfms >> 16) & 0xF) << 4;
++			c->x86_mask = tfms & 15;
++#ifdef CONFIG_X86_HT
++			c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
++#else
++			c->apicid = (ebx >> 24) & 0xFF;
++#endif
++			if (c->x86_capability[0] & (1<<19))
++				c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
++		} else {
++			/* Have CPUID level 0 only - unheard of */
++			c->x86 = 4;
++		}
++
++		/* AMD-defined flags: level 0x80000001 */
++		xlvl = cpuid_eax(0x80000000);
++		if ( (xlvl & 0xffff0000) == 0x80000000 ) {
++			if ( xlvl >= 0x80000001 ) {
++				c->x86_capability[1] = cpuid_edx(0x80000001);
++				c->x86_capability[6] = cpuid_ecx(0x80000001);
++			}
++			if ( xlvl >= 0x80000004 )
++				get_model_name(c); /* Default name */
++		}
 +	}
-+	if (pmtmr_ioport)
-+		printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
-+		       pmtmr_ioport);
++
++	early_intel_workaround(c);
++
++#ifdef CONFIG_X86_HT
++	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
 +#endif
-+	return 0;
 +}
 +
-+unsigned long __init acpi_find_rsdp(void)
++static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 +{
-+	unsigned long rsdp_phys = 0;
++	if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
++		/* Disable processor serial number */
++		unsigned long lo,hi;
++		rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++		lo |= 0x200000;
++		wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++		printk(KERN_NOTICE "CPU serial number disabled.\n");
++		clear_bit(X86_FEATURE_PN, c->x86_capability);
 +
-+	if (efi_enabled) {
-+		if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
-+			return efi.acpi20;
-+		else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
-+			return efi.acpi;
++		/* Disabling the serial number may affect the cpuid level */
++		c->cpuid_level = cpuid_eax(0);
 +	}
-+	/*
-+	 * Scan memory looking for the RSDP signature. First search EBDA (low
-+	 * memory) paragraphs and then search upper memory (E0000-FFFFF).
-+	 */
-+	rsdp_phys = acpi_scan_rsdp(0, 0x400);
-+	if (!rsdp_phys)
-+		rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
++}
 +
-+	return rsdp_phys;
++static int __init x86_serial_nr_setup(char *s)
++{
++	disable_x86_serial_nr = 0;
++	return 1;
 +}
++__setup("serialnumber", x86_serial_nr_setup);
++
++
 +
-+#ifdef	CONFIG_X86_LOCAL_APIC
 +/*
-+ * Parse LAPIC entries in MADT
-+ * returns 0 on success, < 0 on error
++ * This does the hard work of actually picking apart the CPU stuff...
 + */
-+static int __init acpi_parse_madt_lapic_entries(void)
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
 +{
-+	int count;
-+
-+	if (!cpu_has_apic)
-+		return -ENODEV;
++	int i;
 +
-+	/* 
-+	 * Note that the LAPIC address is obtained from the MADT (32-bit value)
-+	 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
-+	 */
++	c->loops_per_jiffy = loops_per_jiffy;
++	c->x86_cache_size = -1;
++	c->x86_vendor = X86_VENDOR_UNKNOWN;
++	c->cpuid_level = -1;	/* CPUID not detected */
++	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
++	c->x86_vendor_id[0] = '\0'; /* Unset */
++	c->x86_model_id[0] = '\0';  /* Unset */
++	c->x86_max_cores = 1;
++	c->x86_clflush_size = 32;
++	memset(&c->x86_capability, 0, sizeof c->x86_capability);
 +
-+	count =
-+	    acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
-+				  acpi_parse_lapic_addr_ovr, 0);
-+	if (count < 0) {
-+		printk(KERN_ERR PREFIX
-+		       "Error parsing LAPIC address override entry\n");
-+		return count;
++	if (!have_cpuid_p()) {
++		/* First of all, decide if this is a 486 or higher */
++		/* It's a 486 if we can modify the AC flag */
++		if ( flag_is_changeable_p(X86_EFLAGS_AC) )
++			c->x86 = 4;
++		else
++			c->x86 = 3;
 +	}
 +
-+	mp_register_lapic_address(acpi_lapic_addr);
++	generic_identify(c);
 +
-+	count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
-+				      MAX_APICS);
-+	if (!count) {
-+		printk(KERN_ERR PREFIX "No LAPIC entries present\n");
-+		/* TBD: Cleanup to allow fallback to MPS */
-+		return -ENODEV;
-+	} else if (count < 0) {
-+		printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
-+		/* TBD: Cleanup to allow fallback to MPS */
-+		return count;
-+	}
++	printk(KERN_DEBUG "CPU: After generic identify, caps:");
++	for (i = 0; i < NCAPINTS; i++)
++		printk(" %08lx", c->x86_capability[i]);
++	printk("\n");
 +
-+	count =
-+	    acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
-+	if (count < 0) {
-+		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
-+		/* TBD: Cleanup to allow fallback to MPS */
-+		return count;
-+	}
-+	return 0;
-+}
-+#endif				/* CONFIG_X86_LOCAL_APIC */
++	if (this_cpu->c_identify) {
++		this_cpu->c_identify(c);
 +
-+#ifdef	CONFIG_X86_IO_APIC
-+/*
-+ * Parse IOAPIC related entries in MADT
-+ * returns 0 on success, < 0 on error
-+ */
-+static int __init acpi_parse_madt_ioapic_entries(void)
-+{
-+	int count;
++		printk(KERN_DEBUG "CPU: After vendor identify, caps:");
++		for (i = 0; i < NCAPINTS; i++)
++			printk(" %08lx", c->x86_capability[i]);
++		printk("\n");
++	}
 +
 +	/*
-+	 * ACPI interpreter is required to complete interrupt setup,
-+	 * so if it is off, don't enumerate the io-apics with ACPI.
-+	 * If MPS is present, it will handle them,
-+	 * otherwise the system will stay in PIC mode
++	 * Vendor-specific initialization.  In this section we
++	 * canonicalize the feature flags, meaning if there are
++	 * features a certain CPU supports which CPUID doesn't
++	 * tell us, CPUID claiming incorrect flags, or other bugs,
++	 * we handle them here.
++	 *
++	 * At the end of this section, c->x86_capability better
++	 * indicate the features this CPU genuinely supports!
 +	 */
-+	if (acpi_disabled || acpi_noirq) {
-+		return -ENODEV;
-+	}
++	if (this_cpu->c_init)
++		this_cpu->c_init(c);
 +
-+	if (!cpu_has_apic) 
-+		return -ENODEV;
++	/* Disable the PN if appropriate */
++	squash_the_stupid_serial_number(c);
 +
 +	/*
-+	 * if "noapic" boot option, don't look for IO-APICs
++	 * The vendor-specific functions might have changed features.  Now
++	 * we do "generic changes."
 +	 */
-+	if (skip_ioapic_setup) {
-+		printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
-+		       "due to 'noapic' option.\n");
-+		return -ENODEV;
-+	}
 +
-+	count =
-+	    acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
-+				  MAX_IO_APICS);
-+	if (!count) {
-+		printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
-+		return -ENODEV;
-+	} else if (count < 0) {
-+		printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
-+		return count;
++	/* TSC disabled? */
++	if ( tsc_disable )
++		clear_bit(X86_FEATURE_TSC, c->x86_capability);
++
++	/* FXSR disabled? */
++	if (disable_x86_fxsr) {
++		clear_bit(X86_FEATURE_FXSR, c->x86_capability);
++		clear_bit(X86_FEATURE_XMM, c->x86_capability);
 +	}
 +
-+	count =
-+	    acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
-+				  NR_IRQ_VECTORS);
-+	if (count < 0) {
-+		printk(KERN_ERR PREFIX
-+		       "Error parsing interrupt source overrides entry\n");
-+		/* TBD: Cleanup to allow fallback to MPS */
-+		return count;
++	/* SEP disabled? */
++	if (disable_x86_sep)
++		clear_bit(X86_FEATURE_SEP, c->x86_capability);
++
++	if (disable_pse)
++		clear_bit(X86_FEATURE_PSE, c->x86_capability);
++
++	/* If the model name is still unset, do table lookup. */
++	if ( !c->x86_model_id[0] ) {
++		char *p;
++		p = table_lookup_model(c);
++		if ( p )
++			strcpy(c->x86_model_id, p);
++		else
++			/* Last resort... */
++			sprintf(c->x86_model_id, "%02x/%02x",
++				c->x86, c->x86_model);
 +	}
 +
++	/* Now the feature flags better reflect actual CPU features! */
++
++	printk(KERN_DEBUG "CPU: After all inits, caps:");
++	for (i = 0; i < NCAPINTS; i++)
++		printk(" %08lx", c->x86_capability[i]);
++	printk("\n");
++
 +	/*
-+	 * If BIOS did not supply an INT_SRC_OVR for the SCI
-+	 * pretend we got one so we can set the SCI flags.
++	 * On SMP, boot_cpu_data holds the common feature set between
++	 * all CPUs; so make sure that we indicate which features are
++	 * common between the CPUs.  The first time this routine gets
++	 * executed, c == &boot_cpu_data.
 +	 */
-+	if (!acpi_sci_override_gsi)
-+		acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
++	if ( c != &boot_cpu_data ) {
++		/* AND the already accumulated flags with these */
++		for ( i = 0 ; i < NCAPINTS ; i++ )
++			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++	}
 +
-+	/* Fill in identity legacy mapings where no override */
-+	mp_config_acpi_legacy_irqs();
++	/* Init Machine Check Exception if available. */
++	mcheck_init(c);
 +
-+	count =
-+	    acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
-+				  NR_IRQ_VECTORS);
-+	if (count < 0) {
-+		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
-+		/* TBD: Cleanup to allow fallback to MPS */
-+		return count;
-+	}
++	if (c == &boot_cpu_data)
++		sysenter_setup();
++	enable_sep_cpu();
 +
-+	return 0;
-+}
-+#else
-+static inline int acpi_parse_madt_ioapic_entries(void)
-+{
-+	return -1;
++	if (c == &boot_cpu_data)
++		mtrr_bp_init();
++	else
++		mtrr_ap_init();
 +}
-+#endif	/* !CONFIG_X86_IO_APIC */
 +
-+static void __init acpi_process_madt(void)
++#ifdef CONFIG_X86_HT
++void __cpuinit detect_ht(struct cpuinfo_x86 *c)
 +{
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	int count, error;
++	u32 	eax, ebx, ecx, edx;
++	int 	index_msb, core_bits;
 +
-+	count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
-+	if (count >= 1) {
++	cpuid(1, &eax, &ebx, &ecx, &edx);
 +
-+		/*
-+		 * Parse MADT LAPIC entries
-+		 */
-+		error = acpi_parse_madt_lapic_entries();
-+		if (!error) {
-+			acpi_lapic = 1;
++	if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
++		return;
 +
-+#ifdef CONFIG_X86_GENERICARCH
-+			generic_bigsmp_probe();
-+#endif
-+			/*
-+			 * Parse MADT IO-APIC entries
-+			 */
-+			error = acpi_parse_madt_ioapic_entries();
-+			if (!error) {
-+				acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
-+				acpi_irq_balance_set(NULL);
-+				acpi_ioapic = 1;
++	smp_num_siblings = (ebx & 0xff0000) >> 16;
 +
-+				smp_found_config = 1;
-+				clustered_apic_check();
-+			}
-+		}
-+		if (error == -EINVAL) {
-+			/*
-+			 * Dell Precision Workstation 410, 610 come here.
-+			 */
-+			printk(KERN_ERR PREFIX
-+			       "Invalid BIOS MADT, disabling ACPI\n");
-+			disable_acpi();
++	if (smp_num_siblings == 1) {
++		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
++	} else if (smp_num_siblings > 1 ) {
++
++		if (smp_num_siblings > NR_CPUS) {
++			printk(KERN_WARNING "CPU: Unsupported number of the "
++					"siblings %d", smp_num_siblings);
++			smp_num_siblings = 1;
++			return;
 +		}
-+	}
-+#endif
-+	return;
-+}
 +
-+extern int acpi_force;
++		index_msb = get_count_order(smp_num_siblings);
++		c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
 +
-+#ifdef __i386__
++		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
++		       c->phys_proc_id);
 +
-+static int __init disable_acpi_irq(struct dmi_system_id *d)
-+{
-+	if (!acpi_force) {
-+		printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
-+		       d->ident);
-+		acpi_noirq_set();
-+	}
-+	return 0;
-+}
++		smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 +
-+static int __init disable_acpi_pci(struct dmi_system_id *d)
-+{
-+	if (!acpi_force) {
-+		printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
-+		       d->ident);
-+		acpi_disable_pci();
-+	}
-+	return 0;
-+}
++		index_msb = get_count_order(smp_num_siblings) ;
 +
-+static int __init dmi_disable_acpi(struct dmi_system_id *d)
-+{
-+	if (!acpi_force) {
-+		printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
-+		disable_acpi();
-+	} else {
-+		printk(KERN_NOTICE
-+		       "Warning: DMI blacklist says broken, but acpi forced\n");
++		core_bits = get_count_order(c->x86_max_cores);
++
++		c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
++					       ((1 << core_bits) - 1);
++
++		if (c->x86_max_cores > 1)
++			printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
++			       c->cpu_core_id);
 +	}
-+	return 0;
 +}
++#endif
 +
-+/*
-+ * Limit ACPI to CPU enumeration for HT
-+ */
-+static int __init force_acpi_ht(struct dmi_system_id *d)
++void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
 +{
-+	if (!acpi_force) {
-+		printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
-+		       d->ident);
-+		disable_acpi();
-+		acpi_ht = 1;
-+	} else {
-+		printk(KERN_NOTICE
-+		       "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
-+	}
-+	return 0;
-+}
++	char *vendor = NULL;
 +
-+/*
-+ * If your system is blacklisted here, but you find that acpi=force
-+ * works for you, please contact acpi-devel at sourceforge.net
-+ */
-+static struct dmi_system_id __initdata acpi_dmi_table[] = {
-+	/*
-+	 * Boxes that need ACPI disabled
-+	 */
-+	{
-+	 .callback = dmi_disable_acpi,
-+	 .ident = "IBM Thinkpad",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+		     DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
-+		     },
-+	 },
++	if (c->x86_vendor < X86_VENDOR_NUM)
++		vendor = this_cpu->c_vendor;
++	else if (c->cpuid_level >= 0)
++		vendor = c->x86_vendor_id;
 +
-+	/*
-+	 * Boxes that need acpi=ht
-+	 */
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "FSC Primergy T850",
-+	 .matches = {
-+		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
-+		     DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
-+		     },
-+	 },
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "DELL GX240",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
-+		     DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
-+		     },
-+	 },
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "HP VISUALIZE NT Workstation",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-+		     DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
-+		     },
-+	 },
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "Compaq Workstation W8000",
-+	 .matches = {
-+		     DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
-+		     DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
-+		     },
-+	 },
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "ASUS P4B266",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+		     DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
-+		     },
-+	 },
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "ASUS P2B-DS",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+		     DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
-+		     },
-+	 },
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "ASUS CUR-DLS",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+		     DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
-+		     },
-+	 },
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "ABIT i440BX-W83977",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
-+		     DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
-+		     },
-+	 },
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "IBM Bladecenter",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+		     DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
-+		     },
-+	 },
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "IBM eServer xSeries 360",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+		     DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
-+		     },
-+	 },
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "IBM eserver xSeries 330",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+		     DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
-+		     },
-+	 },
-+	{
-+	 .callback = force_acpi_ht,
-+	 .ident = "IBM eserver xSeries 440",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+		     DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
-+		     },
-+	 },
++	if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
++		printk("%s ", vendor);
 +
-+	/*
-+	 * Boxes that need ACPI PCI IRQ routing disabled
-+	 */
-+	{
-+	 .callback = disable_acpi_irq,
-+	 .ident = "ASUS A7V",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
-+		     DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
-+		     /* newer BIOS, Revision 1011, does work */
-+		     DMI_MATCH(DMI_BIOS_VERSION,
-+			       "ASUS A7V ACPI BIOS Revision 1007"),
-+		     },
-+	 },
++	if (!c->x86_model_id[0])
++		printk("%d86", c->x86);
++	else
++		printk("%s", c->x86_model_id);
 +
-+	/*
-+	 * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
-+	 */
-+	{			/* _BBN 0 bug */
-+	 .callback = disable_acpi_pci,
-+	 .ident = "ASUS PR-DLS",
-+	 .matches = {
-+		     DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+		     DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
-+		     DMI_MATCH(DMI_BIOS_VERSION,
-+			       "ASUS PR-DLS ACPI BIOS Revision 1010"),
-+		     DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
-+		     },
-+	 },
-+	{
-+	 .callback = disable_acpi_pci,
-+	 .ident = "Acer TravelMate 36x Laptop",
-+	 .matches = {
-+		     DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-+		     DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
-+		     },
-+	 },
-+	{}
-+};
++	if (c->x86_mask || c->cpuid_level >= 0) 
++		printk(" stepping %02x\n", c->x86_mask);
++	else
++		printk("\n");
++}
 +
-+#endif				/* __i386__ */
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
 +
-+/*
-+ * acpi_boot_table_init() and acpi_boot_init()
-+ *  called from setup_arch(), always.
-+ *	1. checksums all tables
-+ *	2. enumerates lapics
-+ *	3. enumerates io-apics
-+ *
-+ * acpi_table_init() is separate to allow reading SRAT without
-+ * other side effects.
-+ *
-+ * side effects of acpi_boot_init:
-+ *	acpi_lapic = 1 if LAPIC found
-+ *	acpi_ioapic = 1 if IOAPIC found
-+ *	if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
-+ *	if acpi_blacklisted() acpi_disabled = 1;
-+ *	acpi_irq_model=...
-+ *	...
-+ *
-+ * return value: (currently ignored)
-+ *	0: success
-+ *	!0: failure
++/* This is hacky. :)
++ * We're emulating future behavior.
++ * In the future, the cpu-specific init functions will be called implicitly
++ * via the magic of initcalls.
++ * They will insert themselves into the cpu_devs structure.
++ * Then, when cpu_init() is called, we can just iterate over that array.
 + */
 +
-+int __init acpi_boot_table_init(void)
++extern int intel_cpu_init(void);
++extern int cyrix_init_cpu(void);
++extern int nsc_init_cpu(void);
++extern int amd_init_cpu(void);
++extern int centaur_init_cpu(void);
++extern int transmeta_init_cpu(void);
++extern int rise_init_cpu(void);
++extern int nexgen_init_cpu(void);
++extern int umc_init_cpu(void);
++
++void __init early_cpu_init(void)
 +{
-+	int error;
++	intel_cpu_init();
++	cyrix_init_cpu();
++	nsc_init_cpu();
++	amd_init_cpu();
++	centaur_init_cpu();
++	transmeta_init_cpu();
++	rise_init_cpu();
++	nexgen_init_cpu();
++	umc_init_cpu();
++	early_cpu_detect();
 +
-+#ifdef __i386__
-+	dmi_check_system(acpi_dmi_table);
++#ifdef CONFIG_DEBUG_PAGEALLOC
++	/* pse is not compatible with on-the-fly unmapping,
++	 * disable it even if the cpus claim to support it.
++	 */
++	clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++	disable_pse = 1;
 +#endif
++}
 +
-+	/*
-+	 * If acpi_disabled, bail out
-+	 * One exception: acpi=ht continues far enough to enumerate LAPICs
-+	 */
-+	if (acpi_disabled && !acpi_ht)
-+		return 1;
++/* We can't move load_gdt to asm/desc.h because it lacks make_lowmen_page_readonly()
++   definition, and as this is still the only user of load_gdt in xen.
++   ToDo: JQ
++ */
 +
-+	/* 
-+	 * Initialize the ACPI boot-time table parser.
-+	 */
-+	error = acpi_table_init();
-+	if (error) {
-+		disable_acpi();
-+		return error;
++#ifdef CONFIG_XEN
++#undef load_gdt
++static void __cpuinit load_gdt(struct Xgt_desc_struct *gdt_descr)
++{
++	unsigned long frames[16];
++	unsigned long va;
++	int f;
++
++	for (va = gdt_descr->address, f = 0;
++	     va < gdt_descr->address + gdt_descr->size;
++	     va += PAGE_SIZE, f++) {
++		frames[f] = virt_to_mfn(va);
++		make_lowmem_page_readonly(
++			(void *)va, XENFEAT_writable_descriptor_tables);
 +	}
++	if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
++		BUG();
++}
++#endif /* CONFIG_XEN */
++
++/* Make sure %gs is initialized properly in idle threads */
++struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
++{
++	memset(regs, 0, sizeof(struct pt_regs));
++	regs->xgs = __KERNEL_PDA;
++	return regs;
++}
 +
-+	acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++static __cpuinit int alloc_gdt(int cpu)
++{
++	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++	struct desc_struct *gdt;
++	struct i386_pda *pda;
++
++	gdt = (struct desc_struct *)cpu_gdt_descr->address;
++	pda = cpu_pda(cpu);
 +
 +	/*
-+	 * blacklist may disable ACPI entirely
++	 * This is a horrible hack to allocate the GDT.  The problem
++	 * is that cpu_init() is called really early for the boot CPU
++	 * (and hence needs bootmem) but much later for the secondary
++	 * CPUs, when bootmem will have gone away
 +	 */
-+	error = acpi_blacklisted();
-+	if (error) {
-+		if (acpi_force) {
-+			printk(KERN_WARNING PREFIX "acpi=force override\n");
-+		} else {
-+			printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
-+			disable_acpi();
-+			return error;
++	if (NODE_DATA(0)->bdata->node_bootmem_map) {
++		BUG_ON(gdt != NULL || pda != NULL);
++
++		gdt = alloc_bootmem_pages(PAGE_SIZE);
++		pda = alloc_bootmem(sizeof(*pda));
++		/* alloc_bootmem(_pages) panics on failure, so no check */
++
++		memset(gdt, 0, PAGE_SIZE);
++		memset(pda, 0, sizeof(*pda));
++#ifdef CONFIG_XEN
++		memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++		cpu_gdt_descr->size = GDT_SIZE;
++#endif
++	} else {
++		/* GDT and PDA might already have been allocated if
++		   this is a CPU hotplug re-insertion. */
++		if (gdt == NULL)
++			gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
++
++		if (pda == NULL)
++			pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu));
++
++		if (unlikely(!gdt || !pda)) {
++			free_pages((unsigned long)gdt, 0);
++			kfree(pda);
++			return 0;
 +		}
 +	}
 +
-+	return 0;
++ 	cpu_gdt_descr->address = (unsigned long)gdt;
++	cpu_pda(cpu) = pda;
++
++	return 1;
 +}
 +
-+int __init acpi_boot_init(void)
++/* Initial PDA used by boot CPU */
++struct i386_pda boot_pda = {
++	._pda = &boot_pda,
++	.cpu_number = 0,
++	.pcurrent = &init_task,
++};
++
++static inline void set_kernel_gs(void)
 +{
-+	/*
-+	 * If acpi_disabled, bail out
-+	 * One exception: acpi=ht continues far enough to enumerate LAPICs
-+	 */
-+	if (acpi_disabled && !acpi_ht)
-+		return 1;
++	/* Set %gs for this CPU's PDA.  Memory clobber is to create a
++	   barrier with respect to any PDA operations, so the compiler
++	   doesn't move any before here. */
++	asm volatile ("mov %0, %%gs" : : "r" (__KERNEL_PDA) : "memory");
++}
 +
-+	acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++/* Initialize the CPU's GDT and PDA.  The boot CPU does this for
++   itself, but secondaries find this done for them. */
++__cpuinit int init_gdt(int cpu, struct task_struct *idle)
++{
++	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++	struct desc_struct *gdt;
++	struct i386_pda *pda;
 +
-+	/*
-+	 * set sci_int and PM timer address
-+	 */
-+	acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
++	/* For non-boot CPUs, the GDT and PDA should already have been
++	   allocated. */
++	if (!alloc_gdt(cpu)) {
++		printk(KERN_CRIT "CPU%d failed to allocate GDT or PDA\n", cpu);
++		return 0;
++	}
 +
-+	/*
-+	 * Process the Multiple APIC Description Table (MADT), if present
-+	 */
-+	acpi_process_madt();
++	gdt = (struct desc_struct *)cpu_gdt_descr->address;
++	pda = cpu_pda(cpu);
 +
-+	acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
++	BUG_ON(gdt == NULL || pda == NULL);
 +
-+	return 0;
-+}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/acpi/Makefile linux-2.6.18-xen/arch/i386/kernel/acpi/Makefile
---- linux-2.6.18.3/arch/i386/kernel/acpi/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/kernel/acpi/Makefile	2006-11-19 14:26:21.000000000 +0100
-@@ -6,3 +6,7 @@
- obj-y				+= cstate.o processor.o
- endif
- 
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+obj-y := $(call cherrypickxen, $(obj-y), $(src))
-+endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/alternative.c linux-2.6.18-xen/arch/i386/kernel/alternative.c
---- linux-2.6.18.3/arch/i386/kernel/alternative.c	2006-12-06 09:06:08.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/alternative.c	2006-12-05 18:42:36.000000000 +0100
-@@ -4,7 +4,11 @@
- #include <asm/alternative.h>
- #include <asm/sections.h>
- 
-+#ifdef CONFIG_X86_64_XEN
-+static int no_replacement    = 1;
-+#else
- static int no_replacement    = 0;
-+#endif
- static int smp_alt_once      = 0;
- static int debug_alternative = 0;
- 
-@@ -151,7 +155,11 @@
- #ifdef CONFIG_X86_64
- 		/* vsyscall code is not mapped yet. resolve it manually. */
- 		if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
-+#ifdef CONFIG_XEN
-+			instr = __va(instr - (u8*)VSYSCALL_START + (u8*)phys_to_machine(__pa_symbol(&__vsyscall_0)));
-+#else
- 			instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
++#ifndef CONFIG_XEN
++	/*
++	 * Initialize the per-CPU GDT with the boot GDT,
++	 * and set up the GDT descriptor:
++	 */
++ 	memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++	cpu_gdt_descr->size = GDT_SIZE - 1;
 +#endif
- 			DPRINTK("%s: vsyscall fixup: %p => %p\n",
- 				__FUNCTION__, a->instr, instr);
- 		}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/apic-xen.c linux-2.6.18-xen/arch/i386/kernel/apic-xen.c
---- linux-2.6.18.3/arch/i386/kernel/apic-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/apic-xen.c	2006-11-19 14:26:21.000000000 +0100
-@@ -0,0 +1,160 @@
-+/*
-+ *	Local APIC handling, local APIC timers
-+ *
-+ *	(c) 1999, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
-+ *					thanks to Eric Gilmore
-+ *					and Rolf G. Tews
-+ *					for testing these extensively.
-+ *	Maciej W. Rozycki	:	Various updates and fixes.
-+ *	Mikael Pettersson	:	Power Management for UP-APIC.
-+ *	Pavel Machek and
-+ *	Mikael Pettersson	:	PM converted to driver model.
-+ */
 +
-+#include <linux/init.h>
 +
-+#include <linux/mm.h>
-+#include <linux/delay.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/sysdev.h>
-+#include <linux/cpu.h>
-+#include <linux/module.h>
++	if (cpu == 0)
++		pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
++			(u32 *)&gdt[GDT_ENTRY_PDA].b,
++			(unsigned long)pda, sizeof(*pda) - 1,
++			0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
++
++	memset(pda, 0, sizeof(*pda));
++	pda->_pda = pda;
++	pda->cpu_number = cpu;
++	pda->pcurrent = idle;
 +
-+#include <asm/atomic.h>
-+#include <asm/smp.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/hpet.h>
-+#include <asm/i8253.h>
-+#include <asm/nmi.h>
++	return 1;
++}
 +
-+#include <mach_apic.h>
-+#include <mach_apicdef.h>
-+#include <mach_ipi.h>
++void __cpuinit cpu_set_gdt(int cpu)
++{
++	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
 +
-+#include "io_ports.h"
++	/* Reinit these anyway, even if they've already been done (on
++	   the boot CPU, this will transition from the boot gdt+pda to
++	   the real ones). */
++	load_gdt(cpu_gdt_descr);
++	set_kernel_gs();
++}
 +
-+#ifndef CONFIG_XEN
-+/*
-+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
-+ * IPIs in place of local APIC timers
-+ */
-+static cpumask_t timer_bcast_ipi;
++/* Common CPU init for both boot and secondary CPUs */
++static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
++{
++#ifndef CONFIG_X86_NO_TSS
++	struct tss_struct * t = &per_cpu(init_tss, cpu);
 +#endif
++	struct thread_struct *thread = &curr->thread;
 +
-+/*
-+ * Knob to control our willingness to enable the local APIC.
-+ */
-+int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
++	if (cpu_test_and_set(cpu, cpu_initialized)) {
++		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
++		for (;;) local_irq_enable();
++	}
 +
-+/*
-+ * Debug level
-+ */
-+int apic_verbosity;
++	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
 +
-+static int modern_apic(void)
-+{
-+#ifndef CONFIG_XEN
-+	unsigned int lvr, version;
-+	/* AMD systems use old APIC versions, so check the CPU */
-+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+		boot_cpu_data.x86 >= 0xf)
-+		return 1;
-+	lvr = apic_read(APIC_LVR);
-+	version = GET_APIC_VERSION(lvr);
-+	return version >= 0x14;
-+#else
-+	return 1;
++	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
++		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++	if (tsc_disable && cpu_has_tsc) {
++		printk(KERN_NOTICE "Disabling TSC...\n");
++		/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
++		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
++		set_in_cr4(X86_CR4_TSD);
++	}
++
++#ifndef CONFIG_X86_NO_IDT
++	load_idt(&idt_descr);
 +#endif
-+}
 +
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+	printk("unexpected IRQ trap at vector %02x\n", irq);
 +	/*
-+	 * Currently unexpected vectors happen only on SMP and APIC.
-+	 * We _must_ ack these because every local APIC has only N
-+	 * irq slots per priority level, and a 'hanging, unacked' IRQ
-+	 * holds up an irq slot - in excessive cases (when multiple
-+	 * unexpected vectors occur) that might lock up the APIC
-+	 * completely.
-+	 * But only ack when the APIC is enabled -AK
++	 * Set up and load the per-CPU TSS and LDT
 +	 */
-+	if (cpu_has_apic)
-+		ack_APIC_irq();
-+}
++	atomic_inc(&init_mm.mm_count);
++	curr->active_mm = &init_mm;
++	if (curr->mm)
++		BUG();
++	enter_lazy_tlb(&init_mm, curr);
 +
-+int get_physical_broadcast(void)
-+{
-+	if (modern_apic())
-+		return 0xff;
-+	else
-+		return 0xf;
-+}
++	load_esp0(t, thread);
++#ifndef CONFIG_X86_NO_TSS
++	set_tss_desc(cpu,t);
++	load_TR_desc();
++#endif
++	load_LDT(&init_mm.context);
 +
-+#ifndef CONFIG_XEN
-+#ifndef CONFIG_SMP
-+static void up_apic_timer_interrupt_call(struct pt_regs *regs)
-+{
-+	int cpu = smp_processor_id();
++#ifdef CONFIG_DOUBLEFAULT
++	/* Set up doublefault TSS pointer in the GDT */
++	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
++#endif
++
++	/* Clear %fs. */
++	asm volatile ("mov %0, %%fs" : : "r" (0));
++
++	/* Clear all 6 debug registers: */
++	set_debugreg(0, 0);
++	set_debugreg(0, 1);
++	set_debugreg(0, 2);
++	set_debugreg(0, 3);
++	set_debugreg(0, 6);
++	set_debugreg(0, 7);
 +
 +	/*
-+	 * the NMI deadlock-detector uses this.
++	 * Force FPU initialization:
 +	 */
-+	per_cpu(irq_stat, cpu).apic_timer_irqs++;
-+
-+	smp_local_timer_interrupt(regs);
++	current_thread_info()->status = 0;
++	clear_used_math();
++	mxcsr_feature_mask_init();
 +}
-+#endif
 +
-+void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
++/* Entrypoint to initialize secondary CPU */
++void __cpuinit secondary_cpu_init(void)
 +{
-+	cpumask_t mask;
++	int cpu = smp_processor_id();
++	struct task_struct *curr = current;
 +
-+	cpus_and(mask, cpu_online_map, timer_bcast_ipi);
-+	if (!cpus_empty(mask)) {
-+#ifdef CONFIG_SMP
-+		send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
-+#else
-+		/*
-+		 * We can directly call the apic timer interrupt handler
-+		 * in UP case. Minus all irq related functions
-+		 */
-+		up_apic_timer_interrupt_call(regs);
-+#endif
-+	}
++	_cpu_init(cpu, curr);
 +}
-+#endif
 +
-+int setup_profiling_timer(unsigned int multiplier)
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ */
++void __cpuinit cpu_init(void)
 +{
-+	return -EINVAL;
++	int cpu = smp_processor_id();
++	struct task_struct *curr = current;
++
++	/* Set up the real GDT and PDA, so we can transition from the
++	   boot versions. */
++	if (!init_gdt(cpu, curr)) {
++		/* failed to allocate something; not much we can do... */
++		for (;;)
++			local_irq_enable();
++	}
++
++	cpu_set_gdt(cpu);
++	_cpu_init(cpu, curr);
 +}
 +
-+/*
-+ * This initializes the IO-APIC and APIC hardware if this is
-+ * a UP kernel.
-+ */
-+int __init APIC_init_uniprocessor (void)
++#ifdef CONFIG_HOTPLUG_CPU
++void __cpuinit cpu_uninit(void)
 +{
-+#ifdef CONFIG_X86_IO_APIC
-+	if (smp_found_config)
-+		if (!skip_ioapic_setup && nr_ioapics)
-+			setup_IO_APIC();
-+#endif
++	int cpu = raw_smp_processor_id();
++	cpu_clear(cpu, cpu_initialized);
 +
-+	return 0;
++	/* lazy TLB state */
++	per_cpu(cpu_tlbstate, cpu).state = 0;
++	per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/asm-offsets.c linux-2.6.18-xen/arch/i386/kernel/asm-offsets.c
---- linux-2.6.18.3/arch/i386/kernel/asm-offsets.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/kernel/asm-offsets.c	2006-11-19 14:26:21.000000000 +0100
-@@ -66,9 +66,14 @@
- 	OFFSET(pbe_orig_address, pbe, orig_address);
- 	OFFSET(pbe_next, pbe, next);
- 
-+#ifndef CONFIG_X86_NO_TSS
- 	/* Offset from the sysenter stack to tss.esp0 */
--	DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, esp0) -
-+	DEFINE(SYSENTER_stack_esp0, offsetof(struct tss_struct, esp0) -
- 		 sizeof(struct tss_struct));
-+#else
-+	/* sysenter stack points directly to esp0 */
-+	DEFINE(SYSENTER_stack_esp0, 0);
 +#endif
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/cpu/Makefile b/arch/i386/kernel/cpu/Makefile
+--- a/arch/i386/kernel/cpu/Makefile	2007-03-12 21:58:05.000000000 +0100
++++ b/arch/i386/kernel/cpu/Makefile	2007-03-14 10:55:14.000000000 +0100
+@@ -17,3 +17,8 @@
  
- 	DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
- 	DEFINE(VDSO_PRELINK, VDSO_PRELINK);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/cpu/common-xen.c linux-2.6.18-xen/arch/i386/kernel/cpu/common-xen.c
---- linux-2.6.18.3/arch/i386/kernel/cpu/common-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/cpu/common-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,739 @@
+ obj-$(CONFIG_MTRR)	+= 	mtrr/
+ obj-$(CONFIG_CPU_FREQ)	+=	cpufreq/
++
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++obj-y := $(call cherrypickxen, $(obj-y), $(src))
++endif
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/cpu/mtrr/main-xen.c b/arch/i386/kernel/cpu/mtrr/main-xen.c
+--- a/arch/i386/kernel/cpu/mtrr/main-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/cpu/mtrr/main-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,197 @@
 +#include <linux/init.h>
-+#include <linux/string.h>
-+#include <linux/delay.h>
-+#include <linux/smp.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
 +#include <linux/module.h>
-+#include <linux/percpu.h>
-+#include <linux/bootmem.h>
-+#include <asm/semaphore.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/msr.h>
-+#include <asm/io.h>
-+#include <asm/mmu_context.h>
-+#include <asm/mtrr.h>
-+#include <asm/mce.h>
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#include <asm/mpspec.h>
-+#include <asm/apic.h>
-+#include <mach_apic.h>
-+#endif
-+#include <asm/hypervisor.h>
-+
-+#include "cpu.h"
-+
-+DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
-+EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
++#include <linux/seq_file.h>
++#include <linux/mutex.h>
++#include <asm/uaccess.h>
 +
-+#ifndef CONFIG_XEN
-+DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-+EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
-+#endif
++#include <asm/mtrr.h>
++#include "mtrr.h"
 +
-+static int cachesize_override __cpuinitdata = -1;
-+static int disable_x86_fxsr __cpuinitdata;
-+static int disable_x86_serial_nr __cpuinitdata = 1;
-+static int disable_x86_sep __cpuinitdata;
++static DEFINE_MUTEX(mtrr_mutex);
 +
-+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
++static void generic_get_mtrr(unsigned int reg, unsigned long *base,
++			     unsigned long *size, mtrr_type * type)
++{
++	dom0_op_t op;
 +
-+extern int disable_pse;
++	op.cmd = DOM0_READ_MEMTYPE;
++	op.u.read_memtype.reg = reg;
++	(void)HYPERVISOR_dom0_op(&op);
 +
-+static void default_init(struct cpuinfo_x86 * c)
-+{
-+	/* Not much we can do here... */
-+	/* Check if at least it has cpuid */
-+	if (c->cpuid_level == -1) {
-+		/* No cpuid. It must be an ancient CPU */
-+		if (c->x86 == 4)
-+			strcpy(c->x86_model_id, "486");
-+		else if (c->x86 == 3)
-+			strcpy(c->x86_model_id, "386");
-+	}
++	*size = op.u.read_memtype.nr_mfns;
++	*base = op.u.read_memtype.mfn;
++	*type = op.u.read_memtype.type;
 +}
 +
-+static struct cpu_dev default_cpu = {
-+	.c_init	= default_init,
-+	.c_vendor = "Unknown",
++struct mtrr_ops generic_mtrr_ops = {
++	.use_intel_if      = 1,
++	.get               = generic_get_mtrr,
 +};
-+static struct cpu_dev * this_cpu = &default_cpu;
 +
-+static int __init cachesize_setup(char *str)
-+{
-+	get_option (&str, &cachesize_override);
-+	return 1;
-+}
-+__setup("cachesize=", cachesize_setup);
++struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
++unsigned int num_var_ranges;
++unsigned int *usage_table;
 +
-+int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++/*  This function returns the number of variable MTRRs  */
++static void __init set_num_var_ranges(void)
 +{
-+	unsigned int *v;
-+	char *p, *q;
-+
-+	if (cpuid_eax(0x80000000) < 0x80000004)
-+		return 0;
-+
-+	v = (unsigned int *) c->x86_model_id;
-+	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-+	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-+	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-+	c->x86_model_id[48] = 0;
++	dom0_op_t op;
 +
-+	/* Intel chips right-justify this string for some dumb reason;
-+	   undo that brain damage */
-+	p = q = &c->x86_model_id[0];
-+	while ( *p == ' ' )
-+	     p++;
-+	if ( p != q ) {
-+	     while ( *p )
-+		  *q++ = *p++;
-+	     while ( q <= &c->x86_model_id[48] )
-+		  *q++ = '\0';	/* Zero-pad the rest */
++	for (num_var_ranges = 0; ; num_var_ranges++) {
++		op.cmd = DOM0_READ_MEMTYPE;
++		op.u.read_memtype.reg = num_var_ranges;
++		if (HYPERVISOR_dom0_op(&op) != 0)
++			break;
 +	}
-+
-+	return 1;
 +}
 +
-+
-+void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
++static void __init init_table(void)
 +{
-+	unsigned int n, dummy, ecx, edx, l2size;
-+
-+	n = cpuid_eax(0x80000000);
++	int i, max;
 +
-+	if (n >= 0x80000005) {
-+		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
-+		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-+			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-+		c->x86_cache_size=(ecx>>24)+(edx>>24);	
++	max = num_var_ranges;
++	if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
++	    == NULL) {
++		printk(KERN_ERR "mtrr: could not allocate\n");
++		return;
 +	}
++	for (i = 0; i < max; i++)
++		usage_table[i] = 0;
++}
 +
-+	if (n < 0x80000006)	/* Some chips just has a large L1. */
-+		return;
++int mtrr_add_page(unsigned long base, unsigned long size, 
++		  unsigned int type, char increment)
++{
++	int error;
++	dom0_op_t op;
 +
-+	ecx = cpuid_ecx(0x80000006);
-+	l2size = ecx >> 16;
-+	
-+	/* do processor-specific cache resizing */
-+	if (this_cpu->c_size_cache)
-+		l2size = this_cpu->c_size_cache(c,l2size);
++	mutex_lock(&mtrr_mutex);
 +
-+	/* Allow user to override all this if necessary. */
-+	if (cachesize_override != -1)
-+		l2size = cachesize_override;
++	op.cmd = DOM0_ADD_MEMTYPE;
++	op.u.add_memtype.mfn     = base;
++	op.u.add_memtype.nr_mfns = size;
++	op.u.add_memtype.type    = type;
++	error = HYPERVISOR_dom0_op(&op);
++	if (error) {
++		mutex_unlock(&mtrr_mutex);
++		BUG_ON(error > 0);
++		return error;
++	}
 +
-+	if ( l2size == 0 )
-+		return;		/* Again, no L2 cache is possible */
++	if (increment)
++		++usage_table[op.u.add_memtype.reg];
 +
-+	c->x86_cache_size = l2size;
++	mutex_unlock(&mtrr_mutex);
 +
-+	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-+	       l2size, ecx & 0xFF);
++	return op.u.add_memtype.reg;
 +}
 +
-+/* Naming convention should be: <Name> [(<Codename>)] */
-+/* This table only is used unless init_<vendor>() below doesn't set it; */
-+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
-+
-+/* Look up CPU names by table lookup. */
-+static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
++static int mtrr_check(unsigned long base, unsigned long size)
 +{
-+	struct cpu_model_info *info;
-+
-+	if ( c->x86_model >= 16 )
-+		return NULL;	/* Range check */
-+
-+	if (!this_cpu)
-+		return NULL;
-+
-+	info = this_cpu->c_models;
-+
-+	while (info && info->family) {
-+		if (info->family == c->x86)
-+			return info->model_names[c->x86_model];
-+		info++;
++	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
++		printk(KERN_WARNING
++			"mtrr: size and base must be multiples of 4 kiB\n");
++		printk(KERN_DEBUG
++			"mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
++		dump_stack();
++		return -1;
 +	}
-+	return NULL;		/* Not found */
++	return 0;
 +}
 +
++int
++mtrr_add(unsigned long base, unsigned long size, unsigned int type,
++	 char increment)
++{
++	if (mtrr_check(base, size))
++		return -EINVAL;
++	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
++			     increment);
++}
 +
-+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
++int mtrr_del_page(int reg, unsigned long base, unsigned long size)
 +{
-+	char *v = c->x86_vendor_id;
-+	int i;
-+	static int printed;
++	unsigned i;
++	mtrr_type ltype;
++	unsigned long lbase, lsize;
++	int error = -EINVAL;
++	dom0_op_t op;
 +
-+	for (i = 0; i < X86_VENDOR_NUM; i++) {
-+		if (cpu_devs[i]) {
-+			if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
-+			    (cpu_devs[i]->c_ident[1] && 
-+			     !strcmp(v,cpu_devs[i]->c_ident[1]))) {
-+				c->x86_vendor = i;
-+				if (!early)
-+					this_cpu = cpu_devs[i];
-+				return;
++	mutex_lock(&mtrr_mutex);
++
++	if (reg < 0) {
++		/*  Search for existing MTRR  */
++		for (i = 0; i < num_var_ranges; ++i) {
++			mtrr_if->get(i, &lbase, &lsize, &ltype);
++			if (lbase == base && lsize == size) {
++				reg = i;
++				break;
 +			}
 +		}
++		if (reg < 0) {
++			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
++			       size);
++			goto out;
++		}
 +	}
-+	if (!printed) {
-+		printed++;
-+		printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
-+		printk(KERN_ERR "CPU: Your system may be unstable.\n");
++	if (usage_table[reg] < 1) {
++		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
++		goto out;
 +	}
-+	c->x86_vendor = X86_VENDOR_UNKNOWN;
-+	this_cpu = &default_cpu;
++	if (--usage_table[reg] < 1) {
++		op.cmd = DOM0_DEL_MEMTYPE;
++		op.u.del_memtype.handle = 0;
++		op.u.del_memtype.reg    = reg;
++		error = HYPERVISOR_dom0_op(&op);
++		if (error) {
++			BUG_ON(error > 0);
++			goto out;
++		}
++	}
++	error = reg;
++ out:
++	mutex_unlock(&mtrr_mutex);
++	return error;
 +}
 +
-+
-+static int __init x86_fxsr_setup(char * s)
++int
++mtrr_del(int reg, unsigned long base, unsigned long size)
 +{
-+	disable_x86_fxsr = 1;
-+	return 1;
-+}
-+__setup("nofxsr", x86_fxsr_setup);
-+
-+
-+static int __init x86_sep_setup(char * s)
-+{
-+	disable_x86_sep = 1;
-+	return 1;
++	if (mtrr_check(base, size))
++		return -EINVAL;
++	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
 +}
-+__setup("nosep", x86_sep_setup);
 +
++EXPORT_SYMBOL(mtrr_add);
++EXPORT_SYMBOL(mtrr_del);
 +
-+/* Standard macro to see if a specific flag is changeable */
-+static inline int flag_is_changeable_p(u32 flag)
++void __init mtrr_bp_init(void)
 +{
-+	u32 f1, f2;
-+
-+	asm("pushfl\n\t"
-+	    "pushfl\n\t"
-+	    "popl %0\n\t"
-+	    "movl %0,%1\n\t"
-+	    "xorl %2,%0\n\t"
-+	    "pushl %0\n\t"
-+	    "popfl\n\t"
-+	    "pushfl\n\t"
-+	    "popl %0\n\t"
-+	    "popfl\n\t"
-+	    : "=&r" (f1), "=&r" (f2)
-+	    : "ir" (flag));
-+
-+	return ((f1^f2) & flag) != 0;
 +}
 +
-+
-+/* Probe for the CPUID instruction */
-+static int __cpuinit have_cpuid_p(void)
++void mtrr_ap_init(void)
 +{
-+	return flag_is_changeable_p(X86_EFLAGS_ID);
 +}
 +
-+/* Do minimum CPU detection early.
-+   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
-+   The others are not touched to avoid unwanted side effects.
-+
-+   WARNING: this function is only called on the BP.  Don't add code here
-+   that is supposed to run on all CPUs. */
-+static void __init early_cpu_detect(void)
++static int __init mtrr_init(void)
 +{
 +	struct cpuinfo_x86 *c = &boot_cpu_data;
 +
-+	c->x86_cache_alignment = 32;
-+
-+	if (!have_cpuid_p())
-+		return;
++	if (!is_initial_xendomain())
++		return -ENODEV;
 +
-+	/* Get vendor name */
-+	cpuid(0x00000000, &c->cpuid_level,
-+	      (int *)&c->x86_vendor_id[0],
-+	      (int *)&c->x86_vendor_id[8],
-+	      (int *)&c->x86_vendor_id[4]);
++	if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
++	    (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
++	    (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
++	    (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
++		return -ENODEV;
 +
-+	get_cpu_vendor(c, 1);
++	set_num_var_ranges();
++	init_table();
 +
-+	c->x86 = 4;
-+	if (c->cpuid_level >= 0x00000001) {
-+		u32 junk, tfms, cap0, misc;
-+		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
-+		c->x86 = (tfms >> 8) & 15;
-+		c->x86_model = (tfms >> 4) & 15;
-+		if (c->x86 == 0xf)
-+			c->x86 += (tfms >> 20) & 0xff;
-+		if (c->x86 >= 0x6)
-+			c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+		c->x86_mask = tfms & 15;
-+		if (cap0 & (1<<19))
-+			c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
-+	}
++	return 0;
 +}
 +
-+void __cpuinit generic_identify(struct cpuinfo_x86 * c)
-+{
-+	u32 tfms, xlvl;
-+	int ebx;
++subsys_initcall(mtrr_init);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/cpu/mtrr/Makefile b/arch/i386/kernel/cpu/mtrr/Makefile
+--- a/arch/i386/kernel/cpu/mtrr/Makefile	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/cpu/mtrr/Makefile	2007-03-14 10:55:14.000000000 +0100
+@@ -1,3 +1,10 @@
+ obj-y		:= main.o if.o generic.o state.o
+ obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
+ 
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++n-obj-xen := generic.o state.o amd.o cyrix.o centaur.o
 +
-+	if (have_cpuid_p()) {
-+		/* Get vendor name */
-+		cpuid(0x00000000, &c->cpuid_level,
-+		      (int *)&c->x86_vendor_id[0],
-+		      (int *)&c->x86_vendor_id[8],
-+		      (int *)&c->x86_vendor_id[4]);
-+		
-+		get_cpu_vendor(c, 0);
-+		/* Initialize the standard set of capabilities */
-+		/* Note that the vendor-specific code below might override */
-+	
-+		/* Intel-defined flags: level 0x00000001 */
-+		if ( c->cpuid_level >= 0x00000001 ) {
-+			u32 capability, excap;
-+			cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
-+			c->x86_capability[0] = capability;
-+			c->x86_capability[4] = excap;
-+			c->x86 = (tfms >> 8) & 15;
-+			c->x86_model = (tfms >> 4) & 15;
-+			if (c->x86 == 0xf)
-+				c->x86 += (tfms >> 20) & 0xff;
-+			if (c->x86 >= 0x6)
-+				c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+			c->x86_mask = tfms & 15;
-+#ifdef CONFIG_X86_HT
-+			c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
-+#else
-+			c->apicid = (ebx >> 24) & 0xFF;
++obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
++obj-y := $(call cherrypickxen, $(obj-y))
++endif
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/e820.c b/arch/i386/kernel/e820.c
+--- a/arch/i386/kernel/e820.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/e820.c	2007-03-14 10:55:14.000000000 +0100
+@@ -15,11 +15,25 @@
+ #include <asm/page.h>
+ #include <asm/e820.h>
+ 
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/memory.h>
++#include <xen/features.h>
++#include <xen/xencons.h>
 +#endif
-+		} else {
-+			/* Have CPUID level 0 only - unheard of */
-+			c->x86 = 4;
-+		}
-+
-+		/* AMD-defined flags: level 0x80000001 */
-+		xlvl = cpuid_eax(0x80000000);
-+		if ( (xlvl & 0xffff0000) == 0x80000000 ) {
-+			if ( xlvl >= 0x80000001 ) {
-+				c->x86_capability[1] = cpuid_edx(0x80000001);
-+				c->x86_capability[6] = cpuid_ecx(0x80000001);
-+			}
-+			if ( xlvl >= 0x80000004 )
-+				get_model_name(c); /* Default name */
-+		}
-+	}
-+
-+	early_intel_workaround(c);
 +
-+#ifdef CONFIG_X86_HT
-+	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
+ #ifdef CONFIG_EFI
+ int efi_enabled = 0;
+ EXPORT_SYMBOL(efi_enabled);
+ #endif
+ 
++#ifdef CONFIG_XEN
++struct e820map machine_e820;
 +#endif
-+}
++static void __init
++e820_setup_gap(struct e820entry *e820, int nr_map);
 +
-+static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
-+{
-+	if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
-+		/* Disable processor serial number */
-+		unsigned long lo,hi;
-+		rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-+		lo |= 0x200000;
-+		wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-+		printk(KERN_NOTICE "CPU serial number disabled.\n");
-+		clear_bit(X86_FEATURE_PN, c->x86_capability);
+ struct e820map e820;
+ struct change_member {
+ 	struct e820entry *pbios; /* pointer to original bios entry */
+@@ -180,6 +194,12 @@
+ 	unsigned char *rom;
+ 	int	      i;
+ 
++#ifdef CONFIG_XEN
++	/* Nothing to do if not running in dom0. */
++	if (!is_initial_xendomain())
++		return;
++#endif
 +
-+		/* Disabling the serial number may affect the cpuid level */
-+		c->cpuid_level = cpuid_eax(0);
-+	}
-+}
+ 	/* video rom */
+ 	upper = adapter_rom_resources[0].start;
+ 	for (start = video_rom_resource.start; start < upper; start += 2048) {
+@@ -247,36 +267,54 @@
+ legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
+ {
+ 	int i;
++	struct e820entry *map = e820.map;
++	int nr_map = e820.nr_map;
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++	struct xen_memory_map memmap;
 +
-+static int __init x86_serial_nr_setup(char *s)
-+{
-+	disable_x86_serial_nr = 0;
-+	return 1;
-+}
-+__setup("serialnumber", x86_serial_nr_setup);
++	map = machine_e820.map;
++	memmap.nr_entries = E820MAX;
 +
++	set_xen_guest_handle(memmap.buffer, map);
 +
++	if(HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++		BUG();
++	machine_e820.nr_map = memmap.nr_entries;
++	nr_map = memmap.nr_entries;
++	e820_setup_gap(map, memmap.nr_entries);
++#endif
+ 
+ 	probe_roms();
+-	for (i = 0; i < e820.nr_map; i++) {
++	for (i = 0; i < nr_map; i++) {
+ 		struct resource *res;
+ #ifndef CONFIG_RESOURCES_64BIT
+-		if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
++		if (map[i].addr + map[i].size > 0x100000000ULL)
+ 			continue;
+ #endif
+ 		res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+-		switch (e820.map[i].type) {
++		switch (map[i].type) {
+ 		case E820_RAM:	res->name = "System RAM"; break;
+ 		case E820_ACPI:	res->name = "ACPI Tables"; break;
+ 		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
+ 		default:	res->name = "reserved";
+ 		}
+-		res->start = e820.map[i].addr;
+-		res->end = res->start + e820.map[i].size - 1;
++		res->start = map[i].addr;
++		res->end = res->start + map[i].size - 1;
+ 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ 		if (request_resource(&iomem_resource, res)) {
+ 			kfree(res);
+ 			continue;
+ 		}
+-		if (e820.map[i].type == E820_RAM) {
++		if (map[i].type == E820_RAM) {
+ 			/*
+ 			 *  We don't know which RAM region contains kernel data,
+ 			 *  so we try it repeatedly and let the resource manager
+ 			 *  test it.
+ 			 */
++#ifndef CONFIG_XEN
+ 			request_resource(res, code_resource);
+ 			request_resource(res, data_resource);
++#endif
+ #ifdef CONFIG_KEXEC
+ 			request_resource(res, &crashk_res);
+ #endif
+@@ -295,6 +333,11 @@
+ 	int i;
+ 
+ 	printk("Setting up standard PCI resources\n");
++#ifdef CONFIG_XEN
++	/* Nothing to do if not running in dom0. */
++	if (!is_initial_xendomain())
++		return 0;
++#endif
+ 	if (efi_enabled)
+ 		efi_initialize_iomem_resources(&code_resource, &data_resource);
+ 	else
+@@ -514,10 +557,13 @@
+  */
+ int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+ {
++#ifndef CONFIG_XEN
+ 	/* Only one memory region (or negative)? Ignore it */
+ 	if (nr_map < 2)
+ 		return -1;
+-
++#else
++	BUG_ON(nr_map < 1);
++#endif
+ 	do {
+ 		unsigned long long start = biosmap->addr;
+ 		unsigned long long size = biosmap->size;
+@@ -529,6 +575,7 @@
+ 		if (start > end)
+ 			return -1;
+ 
++#ifndef CONFIG_XEN
+ 		/*
+ 		 * Some BIOSes claim RAM in the 640k - 1M region.
+ 		 * Not right. Fix it up.
+@@ -549,6 +596,7 @@
+ 				size = end - start;
+ 			}
+ 		}
++#endif
+ 		add_memory_region(start, size, type);
+ 	} while (biosmap++,--nr_map);
+ 	return 0;
+@@ -653,6 +701,15 @@
+ 		 */
+ 		last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+ 
++#ifdef CONFIG_XEN
++		/*
++                 * Truncate to the number of actual pages currently
++                 * present.
++                 */
++		if (last_pfn > xen_start_info->nr_pages)
++			last_pfn = xen_start_info->nr_pages;
++#endif
 +
+ 		if (last_pfn > max_low_pfn)
+ 			last_pfn = max_low_pfn;
+ 
+@@ -668,7 +725,12 @@
+ 	}
+ }
+ 
+-void __init e820_register_memory(void)
 +/*
-+ * This does the hard work of actually picking apart the CPU stuff...
++ * Locate a unused range of the physical address space below 4G which
++ * can be used for PCI mappings.
 + */
-+void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++static void __init
++e820_setup_gap(struct e820entry *e820, int nr_map)
+ {
+ 	unsigned long gapstart, gapsize, round;
+ 	unsigned long long last;
+@@ -681,10 +743,10 @@
+ 	last = 0x100000000ull;
+ 	gapstart = 0x10000000;
+ 	gapsize = 0x400000;
+-	i = e820.nr_map;
++	i = nr_map;
+ 	while (--i >= 0) {
+-		unsigned long long start = e820.map[i].addr;
+-		unsigned long long end = start + e820.map[i].size;
++		unsigned long long start = e820[i].addr;
++		unsigned long long end = start + e820[i].size;
+ 
+ 		/*
+ 		 * Since "last" is at most 4GB, we know we'll
+@@ -716,6 +778,13 @@
+ 		pci_mem_start, gapstart, gapsize);
+ }
+ 
++void __init e820_register_memory(void)
 +{
-+	int i;
-+
-+	c->loops_per_jiffy = loops_per_jiffy;
-+	c->x86_cache_size = -1;
-+	c->x86_vendor = X86_VENDOR_UNKNOWN;
-+	c->cpuid_level = -1;	/* CPUID not detected */
-+	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
-+	c->x86_vendor_id[0] = '\0'; /* Unset */
-+	c->x86_model_id[0] = '\0';  /* Unset */
-+	c->x86_max_cores = 1;
-+	memset(&c->x86_capability, 0, sizeof c->x86_capability);
++#ifndef CONFIG_XEN
++	e820_setup_gap(e820.map, e820.nr_map);
++#endif
++}
 +
-+	if (!have_cpuid_p()) {
-+		/* First of all, decide if this is a 486 or higher */
-+		/* It's a 486 if we can modify the AC flag */
-+		if ( flag_is_changeable_p(X86_EFLAGS_AC) )
-+			c->x86 = 4;
+ void __init print_memory_map(char *who)
+ {
+ 	int i;
+@@ -784,7 +853,7 @@
+ 
+ void __init limit_regions(unsigned long long size)
+ {
+-	unsigned long long current_addr;
++	unsigned long long current_addr = 0;
+ 	int i;
+ 
+ 	print_memory_map("limit_regions start");
+@@ -813,6 +882,19 @@
+ 		print_memory_map("limit_regions endfor");
+ 		return;
+ 	}
++#ifdef CONFIG_XEN
++	if (i==e820.nr_map && current_addr < size) {
++		/*
++                 * The e820 map finished before our requested size so
++                 * extend the final entry to the requested address.
++                 */
++		--i;
++		if (e820.map[i].type == E820_RAM)
++			e820.map[i].size -= current_addr - size;
 +		else
-+			c->x86 = 3;
++			add_memory_region(current_addr, size - current_addr, E820_RAM);
 +	}
++#endif
+ 	print_memory_map("limit_regions endfunc");
+ }
+ 
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
+--- a/arch/i386/kernel/entry.S	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/entry.S	2007-03-14 10:55:14.000000000 +0100
+@@ -284,7 +284,7 @@
+ 	CFI_SIGNAL_FRAME
+ 	CFI_DEF_CFA esp, 0
+ 	CFI_REGISTER esp, ebp
+-	movl TSS_sysenter_esp0(%esp),%esp
++	movl SYSENTER_stack_esp0(%esp),%esp
+ sysenter_past_esp:
+ 	/*
+ 	 * No need to follow this irqs on/off section: the syscall
+@@ -727,7 +727,7 @@
+  * that sets up the real kernel stack. Check here, since we can't
+  * allow the wrong stack to be used.
+  *
+- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
++ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
+  * already pushed 3 words if it hits on the sysenter instruction:
+  * eflags, cs and eip.
+  *
+@@ -739,7 +739,7 @@
+ 	cmpw $__KERNEL_CS,4(%esp);		\
+ 	jne ok;					\
+ label:						\
+-	movl TSS_sysenter_esp0+offset(%esp),%esp;	\
++	movl SYSENTER_stack_esp0+offset(%esp),%esp;	\
+ 	CFI_DEF_CFA esp, 0;			\
+ 	CFI_UNDEFINED eip;			\
+ 	pushfl;					\
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/entry-xen.S b/arch/i386/kernel/entry-xen.S
+--- a/arch/i386/kernel/entry-xen.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/entry-xen.S	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,1254 @@
++/*
++ *  linux/arch/i386/entry.S
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ */
 +
-+	generic_identify(c);
-+
-+	printk(KERN_DEBUG "CPU: After generic identify, caps:");
-+	for (i = 0; i < NCAPINTS; i++)
-+		printk(" %08lx", c->x86_capability[i]);
-+	printk("\n");
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ * This also contains the timer-interrupt handler, as well as all interrupts
++ * and faults that can result in a task-switch.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after a timer-interrupt and after each system call.
++ *
++ * I changed all the .align's to 4 (16 byte alignment), as that's faster
++ * on a 486.
++ *
++ * Stack layout in 'ret_from_system_call':
++ * 	ptrace needs to have all regs on the stack.
++ *	if the order here is changed, it needs to be
++ *	updated in fork.c:copy_process, signal.c:do_signal,
++ *	ptrace.c and ptrace.h
++ *
++ *	 0(%esp) - %ebx
++ *	 4(%esp) - %ecx
++ *	 8(%esp) - %edx
++ *       C(%esp) - %esi
++ *	10(%esp) - %edi
++ *	14(%esp) - %ebp
++ *	18(%esp) - %eax
++ *	1C(%esp) - %ds
++ *	20(%esp) - %es
++ *	24(%esp) - %gs
++ *	28(%esp) - orig_eax
++ *	2C(%esp) - %eip
++ *	30(%esp) - %cs
++ *	34(%esp) - %eflags
++ *	38(%esp) - %oldesp
++ *	3C(%esp) - %oldss
++ *
++ * "current" is in register %ebx during any slow entries.
++ */
 +
-+	if (this_cpu->c_identify) {
-+		this_cpu->c_identify(c);
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/page.h>
++#include <asm/desc.h>
++#include <asm/percpu.h>
++#include <asm/dwarf2.h>
++#include "irq_vectors.h"
++#include <xen/interface/xen.h>
 +
-+		printk(KERN_DEBUG "CPU: After vendor identify, caps:");
-+		for (i = 0; i < NCAPINTS; i++)
-+			printk(" %08lx", c->x86_capability[i]);
-+		printk("\n");
-+	}
++/*
++ * We use macros for low-level operations which need to be overridden
++ * for paravirtualization.  The following will never clobber any registers:
++ *   INTERRUPT_RETURN (aka. "iret")
++ *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
++ *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
++ *
++ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
++ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
++ * Allowing a register to be clobbered can shrink the paravirt replacement
++ * enough to patch inline, increasing performance.
++ */
 +
-+	/*
-+	 * Vendor-specific initialization.  In this section we
-+	 * canonicalize the feature flags, meaning if there are
-+	 * features a certain CPU supports which CPUID doesn't
-+	 * tell us, CPUID claiming incorrect flags, or other bugs,
-+	 * we handle them here.
-+	 *
-+	 * At the end of this section, c->x86_capability better
-+	 * indicate the features this CPU genuinely supports!
-+	 */
-+	if (this_cpu->c_init)
-+		this_cpu->c_init(c);
++#define nr_syscalls ((syscall_table_size)/4)
 +
-+	/* Disable the PN if appropriate */
-+	squash_the_stupid_serial_number(c);
++CF_MASK		= 0x00000001
++TF_MASK		= 0x00000100
++IF_MASK		= 0x00000200
++DF_MASK		= 0x00000400 
++NT_MASK		= 0x00004000
++VM_MASK		= 0x00020000
++/* Pseudo-eflags. */
++NMI_MASK	= 0x80000000
 +
-+	/*
-+	 * The vendor-specific functions might have changed features.  Now
-+	 * we do "generic changes."
-+	 */
++#ifdef CONFIG_XEN
++/* Offsets into shared_info_t. */
++#define evtchn_upcall_pending		/* 0 */
++#define evtchn_upcall_mask		1
 +
-+	/* TSC disabled? */
-+	if ( tsc_disable )
-+		clear_bit(X86_FEATURE_TSC, c->x86_capability);
++#define sizeof_vcpu_shift		6
 +
-+	/* FXSR disabled? */
-+	if (disable_x86_fxsr) {
-+		clear_bit(X86_FEATURE_FXSR, c->x86_capability);
-+		clear_bit(X86_FEATURE_XMM, c->x86_capability);
-+	}
++#ifdef CONFIG_SMP
++#define GET_VCPU_INFO		movl %gs:PDA_cpu,%esi			; \
++				shl  $sizeof_vcpu_shift,%esi		; \
++				addl HYPERVISOR_shared_info,%esi
++#else
++#define GET_VCPU_INFO		movl HYPERVISOR_shared_info,%esi
++#endif
 +
-+	/* SEP disabled? */
-+	if (disable_x86_sep)
-+		clear_bit(X86_FEATURE_SEP, c->x86_capability);
++#define __DISABLE_INTERRUPTS	movb $1,evtchn_upcall_mask(%esi)
++#define __ENABLE_INTERRUPTS	movb $0,evtchn_upcall_mask(%esi)
++#define __TEST_PENDING		testb $0xFF,evtchn_upcall_pending(%esi)
++#endif
 +
-+	if (disable_pse)
-+		clear_bit(X86_FEATURE_PSE, c->x86_capability);
++#ifdef CONFIG_PREEMPT
++#define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
++#else
++#define preempt_stop(clobbers)
++#define resume_kernel		restore_nocheck
++#endif
 +
-+	/* If the model name is still unset, do table lookup. */
-+	if ( !c->x86_model_id[0] ) {
-+		char *p;
-+		p = table_lookup_model(c);
-+		if ( p )
-+			strcpy(c->x86_model_id, p);
-+		else
-+			/* Last resort... */
-+			sprintf(c->x86_model_id, "%02x/%02x",
-+				c->x86, c->x86_model);
-+	}
++.macro TRACE_IRQS_IRET
++#ifdef CONFIG_TRACE_IRQFLAGS
++	testl $IF_MASK,PT_EFLAGS(%esp)     # interrupts off?
++	jz 1f
++	TRACE_IRQS_ON
++1:
++#endif
++.endm
 +
-+	/* Now the feature flags better reflect actual CPU features! */
++#ifdef CONFIG_VM86
++#define resume_userspace_sig	check_userspace
++#else
++#define resume_userspace_sig	resume_userspace
++#endif
 +
-+	printk(KERN_DEBUG "CPU: After all inits, caps:");
-+	for (i = 0; i < NCAPINTS; i++)
-+		printk(" %08lx", c->x86_capability[i]);
-+	printk("\n");
++#define SAVE_ALL \
++	cld; \
++	pushl %gs; \
++	CFI_ADJUST_CFA_OFFSET 4;\
++	/*CFI_REL_OFFSET gs, 0;*/\
++	pushl %es; \
++	CFI_ADJUST_CFA_OFFSET 4;\
++	/*CFI_REL_OFFSET es, 0;*/\
++	pushl %ds; \
++	CFI_ADJUST_CFA_OFFSET 4;\
++	/*CFI_REL_OFFSET ds, 0;*/\
++	pushl %eax; \
++	CFI_ADJUST_CFA_OFFSET 4;\
++	CFI_REL_OFFSET eax, 0;\
++	pushl %ebp; \
++	CFI_ADJUST_CFA_OFFSET 4;\
++	CFI_REL_OFFSET ebp, 0;\
++	pushl %edi; \
++	CFI_ADJUST_CFA_OFFSET 4;\
++	CFI_REL_OFFSET edi, 0;\
++	pushl %esi; \
++	CFI_ADJUST_CFA_OFFSET 4;\
++	CFI_REL_OFFSET esi, 0;\
++	pushl %edx; \
++	CFI_ADJUST_CFA_OFFSET 4;\
++	CFI_REL_OFFSET edx, 0;\
++	pushl %ecx; \
++	CFI_ADJUST_CFA_OFFSET 4;\
++	CFI_REL_OFFSET ecx, 0;\
++	pushl %ebx; \
++	CFI_ADJUST_CFA_OFFSET 4;\
++	CFI_REL_OFFSET ebx, 0;\
++	movl $(__USER_DS), %edx; \
++	movl %edx, %ds; \
++	movl %edx, %es; \
++	movl $(__KERNEL_PDA), %edx; \
++	movl %edx, %gs
 +
-+	/*
-+	 * On SMP, boot_cpu_data holds the common feature set between
-+	 * all CPUs; so make sure that we indicate which features are
-+	 * common between the CPUs.  The first time this routine gets
-+	 * executed, c == &boot_cpu_data.
-+	 */
-+	if ( c != &boot_cpu_data ) {
-+		/* AND the already accumulated flags with these */
-+		for ( i = 0 ; i < NCAPINTS ; i++ )
-+			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-+	}
-+
-+	/* Init Machine Check Exception if available. */
-+	mcheck_init(c);
-+
-+	if (c == &boot_cpu_data)
-+		sysenter_setup();
-+	enable_sep_cpu();
-+
-+	if (c == &boot_cpu_data)
-+		mtrr_bp_init();
-+	else
-+		mtrr_ap_init();
-+}
-+
-+#ifdef CONFIG_X86_HT
-+void __cpuinit detect_ht(struct cpuinfo_x86 *c)
-+{
-+	u32 	eax, ebx, ecx, edx;
-+	int 	index_msb, core_bits;
-+
-+	cpuid(1, &eax, &ebx, &ecx, &edx);
-+
-+	if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
-+		return;
-+
-+	smp_num_siblings = (ebx & 0xff0000) >> 16;
++#define RESTORE_INT_REGS \
++	popl %ebx;	\
++	CFI_ADJUST_CFA_OFFSET -4;\
++	CFI_RESTORE ebx;\
++	popl %ecx;	\
++	CFI_ADJUST_CFA_OFFSET -4;\
++	CFI_RESTORE ecx;\
++	popl %edx;	\
++	CFI_ADJUST_CFA_OFFSET -4;\
++	CFI_RESTORE edx;\
++	popl %esi;	\
++	CFI_ADJUST_CFA_OFFSET -4;\
++	CFI_RESTORE esi;\
++	popl %edi;	\
++	CFI_ADJUST_CFA_OFFSET -4;\
++	CFI_RESTORE edi;\
++	popl %ebp;	\
++	CFI_ADJUST_CFA_OFFSET -4;\
++	CFI_RESTORE ebp;\
++	popl %eax;	\
++	CFI_ADJUST_CFA_OFFSET -4;\
++	CFI_RESTORE eax
 +
-+	if (smp_num_siblings == 1) {
-+		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
-+	} else if (smp_num_siblings > 1 ) {
++#define RESTORE_REGS	\
++	RESTORE_INT_REGS; \
++1:	popl %ds;	\
++	CFI_ADJUST_CFA_OFFSET -4;\
++	/*CFI_RESTORE ds;*/\
++2:	popl %es;	\
++	CFI_ADJUST_CFA_OFFSET -4;\
++	/*CFI_RESTORE es;*/\
++3:	popl %gs;	\
++	CFI_ADJUST_CFA_OFFSET -4;\
++	/*CFI_RESTORE gs;*/\
++.pushsection .fixup,"ax";	\
++4:	movl $0,(%esp);	\
++	jmp 1b;		\
++5:	movl $0,(%esp);	\
++	jmp 2b;		\
++6:	movl $0,(%esp);	\
++	jmp 3b;		\
++.section __ex_table,"a";\
++	.align 4;	\
++	.long 1b,4b;	\
++	.long 2b,5b;	\
++	.long 3b,6b;	\
++.popsection
 +
-+		if (smp_num_siblings > NR_CPUS) {
-+			printk(KERN_WARNING "CPU: Unsupported number of the "
-+					"siblings %d", smp_num_siblings);
-+			smp_num_siblings = 1;
-+			return;
-+		}
++#define RING0_INT_FRAME \
++	CFI_STARTPROC simple;\
++	CFI_SIGNAL_FRAME;\
++	CFI_DEF_CFA esp, 3*4;\
++	/*CFI_OFFSET cs, -2*4;*/\
++	CFI_OFFSET eip, -3*4
 +
-+		index_msb = get_count_order(smp_num_siblings);
-+		c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
++#define RING0_EC_FRAME \
++	CFI_STARTPROC simple;\
++	CFI_SIGNAL_FRAME;\
++	CFI_DEF_CFA esp, 4*4;\
++	/*CFI_OFFSET cs, -2*4;*/\
++	CFI_OFFSET eip, -3*4
 +
-+		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
-+		       c->phys_proc_id);
++#define RING0_PTREGS_FRAME \
++	CFI_STARTPROC simple;\
++	CFI_SIGNAL_FRAME;\
++	CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
++	/*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
++	CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
++	/*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
++	/*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
++	CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
++	CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
++	CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
++	CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
++	CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
++	CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
++	CFI_OFFSET ebx, PT_EBX-PT_OLDESP
 +
-+		smp_num_siblings = smp_num_siblings / c->x86_max_cores;
++ENTRY(ret_from_fork)
++	CFI_STARTPROC
++	pushl %eax
++	CFI_ADJUST_CFA_OFFSET 4
++	call schedule_tail
++	GET_THREAD_INFO(%ebp)
++	popl %eax
++	CFI_ADJUST_CFA_OFFSET -4
++	pushl $0x0202			# Reset kernel eflags
++	CFI_ADJUST_CFA_OFFSET 4
++	popfl
++	CFI_ADJUST_CFA_OFFSET -4
++	jmp syscall_exit
++	CFI_ENDPROC
 +
-+		index_msb = get_count_order(smp_num_siblings) ;
++/*
++ * Return to user mode is not as complex as all this looks,
++ * but we want the default path for a system call return to
++ * go as quickly as possible which is why some of this is
++ * less clear than it otherwise should be.
++ */
 +
-+		core_bits = get_count_order(c->x86_max_cores);
++	# userspace resumption stub bypassing syscall exit tracing
++	ALIGN
++	RING0_PTREGS_FRAME
++ret_from_exception:
++	preempt_stop(CLBR_ANY)
++ret_from_intr:
++	GET_THREAD_INFO(%ebp)
++check_userspace:
++	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS and CS
++	movb PT_CS(%esp), %al
++	andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
++	cmpl $USER_RPL, %eax
++	jb resume_kernel		# not returning to v8086 or userspace
 +
-+		c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
-+					       ((1 << core_bits) - 1);
++ENTRY(resume_userspace)
++ 	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
++					# setting need_resched or sigpending
++					# between sampling and the iret
++	movl TI_flags(%ebp), %ecx
++	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done on
++					# int/exception return?
++	jne work_pending
++	jmp restore_all
 +
-+		if (c->x86_max_cores > 1)
-+			printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
-+			       c->cpu_core_id);
-+	}
-+}
++#ifdef CONFIG_PREEMPT
++ENTRY(resume_kernel)
++	DISABLE_INTERRUPTS(CLBR_ANY)
++	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
++	jnz restore_nocheck
++need_resched:
++	movl TI_flags(%ebp), %ecx	# need_resched set ?
++	testb $_TIF_NEED_RESCHED, %cl
++	jz restore_all
++	testl $IF_MASK,PT_EFLAGS(%esp)	# interrupts off (exception path) ?
++	jz restore_all
++	call preempt_schedule_irq
++	jmp need_resched
 +#endif
++	CFI_ENDPROC
 +
-+void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
-+{
-+	char *vendor = NULL;
-+
-+	if (c->x86_vendor < X86_VENDOR_NUM)
-+		vendor = this_cpu->c_vendor;
-+	else if (c->cpuid_level >= 0)
-+		vendor = c->x86_vendor_id;
-+
-+	if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
-+		printk("%s ", vendor);
-+
-+	if (!c->x86_model_id[0])
-+		printk("%d86", c->x86);
-+	else
-+		printk("%s", c->x86_model_id);
-+
-+	if (c->x86_mask || c->cpuid_level >= 0) 
-+		printk(" stepping %02x\n", c->x86_mask);
-+	else
-+		printk("\n");
-+}
-+
-+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
-+
-+/* This is hacky. :)
-+ * We're emulating future behavior.
-+ * In the future, the cpu-specific init functions will be called implicitly
-+ * via the magic of initcalls.
-+ * They will insert themselves into the cpu_devs structure.
-+ * Then, when cpu_init() is called, we can just iterate over that array.
-+ */
-+
-+extern int intel_cpu_init(void);
-+extern int cyrix_init_cpu(void);
-+extern int nsc_init_cpu(void);
-+extern int amd_init_cpu(void);
-+extern int centaur_init_cpu(void);
-+extern int transmeta_init_cpu(void);
-+extern int rise_init_cpu(void);
-+extern int nexgen_init_cpu(void);
-+extern int umc_init_cpu(void);
-+
-+void __init early_cpu_init(void)
-+{
-+	intel_cpu_init();
-+	cyrix_init_cpu();
-+	nsc_init_cpu();
-+	amd_init_cpu();
-+	centaur_init_cpu();
-+	transmeta_init_cpu();
-+	rise_init_cpu();
-+	nexgen_init_cpu();
-+	umc_init_cpu();
-+	early_cpu_detect();
++/* SYSENTER_RETURN points to after the "sysenter" instruction in
++   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
 +
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+	/* pse is not compatible with on-the-fly unmapping,
-+	 * disable it even if the cpus claim to support it.
++	# sysenter call handler stub
++ENTRY(sysenter_entry)
++	CFI_STARTPROC simple
++	CFI_SIGNAL_FRAME
++	CFI_DEF_CFA esp, 0
++	CFI_REGISTER esp, ebp
++	movl SYSENTER_stack_esp0(%esp),%esp
++sysenter_past_esp:
++	/*
++	 * No need to follow this irqs on/off section: the syscall
++	 * disabled irqs and here we enable it straight after entry:
 +	 */
-+	clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-+	disable_pse = 1;
++	ENABLE_INTERRUPTS(CLBR_NONE)
++	pushl $(__USER_DS)
++	CFI_ADJUST_CFA_OFFSET 4
++	/*CFI_REL_OFFSET ss, 0*/
++	pushl %ebp
++	CFI_ADJUST_CFA_OFFSET 4
++	CFI_REL_OFFSET esp, 0
++	pushfl
++	CFI_ADJUST_CFA_OFFSET 4
++	pushl $(__USER_CS)
++	CFI_ADJUST_CFA_OFFSET 4
++	/*CFI_REL_OFFSET cs, 0*/
++#ifndef CONFIG_COMPAT_VDSO
++	/*
++	 * Push current_thread_info()->sysenter_return to the stack.
++	 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
++	 * pushed above; +8 corresponds to copy_thread's esp0 setting.
++	 */
++	pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++#else
++	pushl $SYSENTER_RETURN
 +#endif
-+}
-+
-+void __cpuinit cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
-+{
-+	unsigned long frames[16];
-+	unsigned long va;
-+	int f;
-+
-+	for (va = gdt_descr->address, f = 0;
-+	     va < gdt_descr->address + gdt_descr->size;
-+	     va += PAGE_SIZE, f++) {
-+		frames[f] = virt_to_mfn(va);
-+		make_lowmem_page_readonly(
-+			(void *)va, XENFEAT_writable_descriptor_tables);
-+	}
-+	if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
-+		BUG();
-+}
++	CFI_ADJUST_CFA_OFFSET 4
++	CFI_REL_OFFSET eip, 0
 +
 +/*
-+ * cpu_init() initializes state that is per-CPU. Some data is already
-+ * initialized (naturally) in the bootstrap process, such as the GDT
-+ * and IDT. We reload them nevertheless, this function acts as a
-+ * 'CPU state barrier', nothing should get across.
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
 + */
-+void __cpuinit cpu_init(void)
-+{
-+	int cpu = smp_processor_id();
-+#ifndef CONFIG_X86_NO_TSS
-+	struct tss_struct * t = &per_cpu(init_tss, cpu);
-+#endif
-+	struct thread_struct *thread = &current->thread;
-+	struct desc_struct *gdt;
-+	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
-+
-+	if (cpu_test_and_set(cpu, cpu_initialized)) {
-+		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
-+		for (;;) local_irq_enable();
-+	}
-+	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
++	cmpl $__PAGE_OFFSET-3,%ebp
++	jae syscall_fault
++1:	movl (%ebp),%ebp
++.section __ex_table,"a"
++	.align 4
++	.long 1b,syscall_fault
++.previous
 +
-+	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
-+		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-+	if (tsc_disable && cpu_has_tsc) {
-+		printk(KERN_NOTICE "Disabling TSC...\n");
-+		/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
-+		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
-+		set_in_cr4(X86_CR4_TSD);
-+	}
++	pushl %eax
++	CFI_ADJUST_CFA_OFFSET 4
++	SAVE_ALL
++	GET_THREAD_INFO(%ebp)
 +
-+#ifndef CONFIG_XEN
-+	/* The CPU hotplug case */
-+	if (cpu_gdt_descr->address) {
-+		gdt = (struct desc_struct *)cpu_gdt_descr->address;
-+		memset(gdt, 0, PAGE_SIZE);
-+		goto old_gdt;
-+	}
-+	/*
-+	 * This is a horrible hack to allocate the GDT.  The problem
-+	 * is that cpu_init() is called really early for the boot CPU
-+	 * (and hence needs bootmem) but much later for the secondary
-+	 * CPUs, when bootmem will have gone away
-+	 */
-+	if (NODE_DATA(0)->bdata->node_bootmem_map) {
-+		gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
-+		/* alloc_bootmem_pages panics on failure, so no check */
-+		memset(gdt, 0, PAGE_SIZE);
-+	} else {
-+		gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
-+		if (unlikely(!gdt)) {
-+			printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
-+			for (;;)
-+				local_irq_enable();
-+		}
-+	}
-+old_gdt:
-+	/*
-+	 * Initialize the per-CPU GDT with the boot GDT,
-+	 * and set up the GDT descriptor:
-+	 */
-+ 	memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++	testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++	jnz syscall_trace_entry
++	cmpl $(nr_syscalls), %eax
++	jae syscall_badsys
++	call *sys_call_table(,%eax,4)
++	movl %eax,PT_EAX(%esp)
++	DISABLE_INTERRUPTS(CLBR_ECX|CLBR_EDX)
++	TRACE_IRQS_OFF
++	movl TI_flags(%ebp), %ecx
++	testw $_TIF_ALLWORK_MASK, %cx
++	jne syscall_exit_work
++/* if something modifies registers it must also disable sysexit */
++	movl PT_EIP(%esp), %edx
++	movl PT_OLDESP(%esp), %ecx
++	xorl %ebp,%ebp
++	TRACE_IRQS_ON
++1:	mov  PT_GS(%esp), %gs
++#ifdef CONFIG_XEN
++	__ENABLE_INTERRUPTS
++sysexit_scrit:	/**** START OF SYSEXIT CRITICAL REGION ****/
++	__TEST_PENDING
++	jnz  14f			# process more events if necessary...
++	movl PT_ESI(%esp), %esi
++	sysexit
++14:	__DISABLE_INTERRUPTS
++sysexit_ecrit:	/**** END OF SYSEXIT CRITICAL REGION ****/
++	push %esp
++	CFI_ADJUST_CFA_OFFSET 4
++	call evtchn_do_upcall
++	add  $4,%esp
++	CFI_ADJUST_CFA_OFFSET -4
++	jmp  ret_from_intr
++#else
++	ENABLE_INTERRUPTS_SYSEXIT
++#endif /* !CONFIG_XEN */
++	CFI_ENDPROC
++.pushsection .fixup,"ax"
++2:	movl $0,PT_GS(%esp)
++	jmp 1b
++.section __ex_table,"a"
++	.align 4
++	.long 1b,2b
++.popsection
 +
-+	/* Set up GDT entry for 16bit stack */
-+ 	*(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
-+		((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
-+		((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
-+		(CPU_16BIT_STACK_SIZE - 1);
++	# system call handler stub
++ENTRY(system_call)
++	RING0_INT_FRAME			# can't unwind into user space anyway
++	pushl %eax			# save orig_eax
++	CFI_ADJUST_CFA_OFFSET 4
++	SAVE_ALL
++	GET_THREAD_INFO(%ebp)
++	testl $TF_MASK,PT_EFLAGS(%esp)
++	jz no_singlestep
++	orl $_TIF_SINGLESTEP,TI_flags(%ebp)
++no_singlestep:
++					# system call tracing in operation / emulation
++	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++	testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++	jnz syscall_trace_entry
++	cmpl $(nr_syscalls), %eax
++	jae syscall_badsys
++syscall_call:
++	call *sys_call_table(,%eax,4)
++	movl %eax,PT_EAX(%esp)		# store the return value
++syscall_exit:
++	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
++					# setting need_resched or sigpending
++					# between sampling and the iret
++	TRACE_IRQS_OFF
++	movl TI_flags(%ebp), %ecx
++	testw $_TIF_ALLWORK_MASK, %cx	# current->work
++	jne syscall_exit_work
 +
-+	cpu_gdt_descr->size = GDT_SIZE - 1;
-+ 	cpu_gdt_descr->address = (unsigned long)gdt;
++restore_all:
++#ifndef CONFIG_XEN
++	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS, SS and CS
++	# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
++	# are returning to the kernel.
++	# See comments in process.c:copy_thread() for details.
++	movb PT_OLDSS(%esp), %ah
++	movb PT_CS(%esp), %al
++	andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
++	cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
++	CFI_REMEMBER_STATE
++	je ldt_ss			# returning to user-space with LDT SS
++restore_nocheck:
 +#else
-+	if (cpu == 0 && cpu_gdt_descr->address == 0) {
-+		gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
-+		/* alloc_bootmem_pages panics on failure, so no check */
-+		memset(gdt, 0, PAGE_SIZE);
-+
-+		memcpy(gdt, cpu_gdt_table, GDT_SIZE);
-+		
-+		cpu_gdt_descr->size = GDT_SIZE;
-+		cpu_gdt_descr->address = (unsigned long)gdt;
-+	}
++restore_nocheck:
++	movl PT_EFLAGS(%esp), %eax
++	testl $(VM_MASK|NMI_MASK), %eax
++	jnz hypervisor_iret
++	shr $9, %eax			# EAX[0] == IRET_EFLAGS.IF
++	GET_VCPU_INFO
++	andb evtchn_upcall_mask(%esi),%al
++	andb $1,%al			# EAX[0] == IRET_EFLAGS.IF & event_mask
++	jnz restore_all_enable_events	#        != 0 => enable event delivery
++	CFI_REMEMBER_STATE
++#endif
++	TRACE_IRQS_IRET
++restore_nocheck_notrace:
++	RESTORE_REGS
++	addl $4, %esp			# skip orig_eax/error_code
++	CFI_ADJUST_CFA_OFFSET -4
++1:	INTERRUPT_RETURN
++.section .fixup,"ax"
++iret_exc:
++#ifndef CONFIG_XEN
++	TRACE_IRQS_ON
++	ENABLE_INTERRUPTS(CLBR_NONE)
 +#endif
++	pushl $0			# no error code
++	pushl $do_iret_error
++	jmp error_code
++.previous
++.section __ex_table,"a"
++	.align 4
++	.long 1b,iret_exc
++.previous
 +
-+	cpu_gdt_init(cpu_gdt_descr);
++	CFI_RESTORE_STATE
++#ifndef CONFIG_XEN
++ldt_ss:
++	larl PT_OLDSS(%esp), %eax
++	jnz restore_nocheck
++	testl $0x00400000, %eax		# returning to 32bit stack?
++	jnz restore_nocheck		# allright, normal return
 +
++#ifdef CONFIG_PARAVIRT
 +	/*
-+	 * Set up and load the per-CPU TSS and LDT
++	 * The kernel can't run on a non-flat stack if paravirt mode
++	 * is active.  Rather than try to fixup the high bits of
++	 * ESP, bypass this code entirely.  This may break DOSemu
++	 * and/or Wine support in a paravirt VM, although the option
++	 * is still available to implement the setting of the high
++	 * 16-bits in the INTERRUPT_RETURN paravirt-op.
 +	 */
-+	atomic_inc(&init_mm.mm_count);
-+	current->active_mm = &init_mm;
-+	if (current->mm)
-+		BUG();
-+	enter_lazy_tlb(&init_mm, current);
-+
-+	load_esp0(t, thread);
-+
-+	load_LDT(&init_mm.context);
-+
-+#ifdef CONFIG_DOUBLEFAULT
-+	/* Set up doublefault TSS pointer in the GDT */
-+	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
++	cmpl $0, paravirt_ops+PARAVIRT_enabled
++	jne restore_nocheck
 +#endif
 +
-+	/* Clear %fs and %gs. */
-+	asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
-+
-+	/* Clear all 6 debug registers: */
-+	set_debugreg(0, 0);
-+	set_debugreg(0, 1);
-+	set_debugreg(0, 2);
-+	set_debugreg(0, 3);
-+	set_debugreg(0, 6);
-+	set_debugreg(0, 7);
++	/* If returning to userspace with 16bit stack,
++	 * try to fix the higher word of ESP, as the CPU
++	 * won't restore it.
++	 * This is an "official" bug of all the x86-compatible
++	 * CPUs, which we can try to work around to make
++	 * dosemu and wine happy. */
++	movl PT_OLDESP(%esp), %eax
++	movl %esp, %edx
++	call patch_espfix_desc
++	pushl $__ESPFIX_SS
++	CFI_ADJUST_CFA_OFFSET 4
++	pushl %eax
++	CFI_ADJUST_CFA_OFFSET 4
++	DISABLE_INTERRUPTS(CLBR_EAX)
++	TRACE_IRQS_OFF
++	lss (%esp), %esp
++	CFI_ADJUST_CFA_OFFSET -8
++	jmp restore_nocheck
++#else
++hypervisor_iret:
++	andl $~NMI_MASK, PT_EFLAGS(%esp)
++	RESTORE_REGS
++	addl $4, %esp
++	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
++#endif
++	CFI_ENDPROC
 +
-+	/*
-+	 * Force FPU initialization:
-+	 */
-+	current_thread_info()->status = 0;
-+	clear_used_math();
-+	mxcsr_feature_mask_init();
-+}
++	# perform work that needs to be done immediately before resumption
++	ALIGN
++	RING0_PTREGS_FRAME		# can't unwind into user space anyway
++work_pending:
++	testb $_TIF_NEED_RESCHED, %cl
++	jz work_notifysig
++work_resched:
++	call schedule
++	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
++					# setting need_resched or sigpending
++					# between sampling and the iret
++	TRACE_IRQS_OFF
++	movl TI_flags(%ebp), %ecx
++	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done other
++					# than syscall tracing?
++	jz restore_all
++	testb $_TIF_NEED_RESCHED, %cl
++	jnz work_resched
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+void __cpuinit cpu_uninit(void)
-+{
-+	int cpu = raw_smp_processor_id();
-+	cpu_clear(cpu, cpu_initialized);
++work_notifysig:				# deal with pending signals and
++					# notify-resume requests
++#ifdef CONFIG_VM86
++	testl $VM_MASK, PT_EFLAGS(%esp)
++	movl %esp, %eax
++	jne work_notifysig_v86		# returning to kernel-space or
++					# vm86-space
++	xorl %edx, %edx
++	call do_notify_resume
++	jmp resume_userspace_sig
 +
-+	/* lazy TLB state */
-+	per_cpu(cpu_tlbstate, cpu).state = 0;
-+	per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
-+}
++	ALIGN
++work_notifysig_v86:
++	pushl %ecx			# save ti_flags for do_notify_resume
++	CFI_ADJUST_CFA_OFFSET 4
++	call save_v86_state		# %eax contains pt_regs pointer
++	popl %ecx
++	CFI_ADJUST_CFA_OFFSET -4
++	movl %eax, %esp
++#else
++	movl %esp, %eax
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/cpu/Makefile linux-2.6.18-xen/arch/i386/kernel/cpu/Makefile
---- linux-2.6.18.3/arch/i386/kernel/cpu/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/kernel/cpu/Makefile	2006-11-19 14:26:21.000000000 +0100
-@@ -17,3 +17,8 @@
- 
- obj-$(CONFIG_MTRR)	+= 	mtrr/
- obj-$(CONFIG_CPU_FREQ)	+=	cpufreq/
++	xorl %edx, %edx
++	call do_notify_resume
++	jmp resume_userspace_sig
 +
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+obj-y := $(call cherrypickxen, $(obj-y), $(src))
-+endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/cpu/mtrr/main-xen.c linux-2.6.18-xen/arch/i386/kernel/cpu/mtrr/main-xen.c
---- linux-2.6.18.3/arch/i386/kernel/cpu/mtrr/main-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/cpu/mtrr/main-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,197 @@
-+#include <linux/init.h>
-+#include <linux/proc_fs.h>
-+#include <linux/ctype.h>
-+#include <linux/module.h>
-+#include <linux/seq_file.h>
-+#include <linux/mutex.h>
-+#include <asm/uaccess.h>
-+
-+#include <asm/mtrr.h>
-+#include "mtrr.h"
-+
-+static DEFINE_MUTEX(mtrr_mutex);
-+
-+void generic_get_mtrr(unsigned int reg, unsigned long *base,
-+		      unsigned int *size, mtrr_type * type)
-+{
-+	dom0_op_t op;
-+
-+	op.cmd = DOM0_READ_MEMTYPE;
-+	op.u.read_memtype.reg = reg;
-+	(void)HYPERVISOR_dom0_op(&op);
-+
-+	*size = op.u.read_memtype.nr_mfns;
-+	*base = op.u.read_memtype.mfn;
-+	*type = op.u.read_memtype.type;
-+}
-+
-+struct mtrr_ops generic_mtrr_ops = {
-+	.use_intel_if      = 1,
-+	.get               = generic_get_mtrr,
-+};
-+
-+struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
-+unsigned int num_var_ranges;
-+unsigned int *usage_table;
-+
-+static void __init set_num_var_ranges(void)
-+{
-+	dom0_op_t op;
-+
-+	for (num_var_ranges = 0; ; num_var_ranges++) {
-+		op.cmd = DOM0_READ_MEMTYPE;
-+		op.u.read_memtype.reg = num_var_ranges;
-+		if (HYPERVISOR_dom0_op(&op) != 0)
-+			break;
-+	}
-+}
-+
-+static void __init init_table(void)
-+{
-+	int i, max;
-+
-+	max = num_var_ranges;
-+	if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
-+	    == NULL) {
-+		printk(KERN_ERR "mtrr: could not allocate\n");
-+		return;
-+	}
-+	for (i = 0; i < max; i++)
-+		usage_table[i] = 0;
-+}
-+
-+int mtrr_add_page(unsigned long base, unsigned long size, 
-+		  unsigned int type, char increment)
-+{
-+	int error;
-+	dom0_op_t op;
-+
-+	mutex_lock(&mtrr_mutex);
-+
-+	op.cmd = DOM0_ADD_MEMTYPE;
-+	op.u.add_memtype.mfn     = base;
-+	op.u.add_memtype.nr_mfns = size;
-+	op.u.add_memtype.type    = type;
-+	error = HYPERVISOR_dom0_op(&op);
-+	if (error) {
-+		mutex_unlock(&mtrr_mutex);
-+		BUG_ON(error > 0);
-+		return error;
-+	}
-+
-+	if (increment)
-+		++usage_table[op.u.add_memtype.reg];
-+
-+	mutex_unlock(&mtrr_mutex);
-+
-+	return op.u.add_memtype.reg;
-+}
-+
-+static int mtrr_check(unsigned long base, unsigned long size)
-+{
-+	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
-+		printk(KERN_WARNING
-+			"mtrr: size and base must be multiples of 4 kiB\n");
-+		printk(KERN_DEBUG
-+			"mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
-+		dump_stack();
-+		return -1;
-+	}
-+	return 0;
-+}
-+
-+int
-+mtrr_add(unsigned long base, unsigned long size, unsigned int type,
-+	 char increment)
-+{
-+	if (mtrr_check(base, size))
-+		return -EINVAL;
-+	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
-+			     increment);
-+}
-+
-+int mtrr_del_page(int reg, unsigned long base, unsigned long size)
-+{
-+	unsigned i;
-+	mtrr_type ltype;
-+	unsigned long lbase;
-+	unsigned int lsize;
-+	int error = -EINVAL;
-+	dom0_op_t op;
-+
-+	mutex_lock(&mtrr_mutex);
-+
-+	if (reg < 0) {
-+		/*  Search for existing MTRR  */
-+		for (i = 0; i < num_var_ranges; ++i) {
-+			mtrr_if->get(i, &lbase, &lsize, &ltype);
-+			if (lbase == base && lsize == size) {
-+				reg = i;
-+				break;
-+			}
-+		}
-+		if (reg < 0) {
-+			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
-+			       size);
-+			goto out;
-+		}
-+	}
-+	if (usage_table[reg] < 1) {
-+		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
-+		goto out;
-+	}
-+	if (--usage_table[reg] < 1) {
-+		op.cmd = DOM0_DEL_MEMTYPE;
-+		op.u.del_memtype.handle = 0;
-+		op.u.del_memtype.reg    = reg;
-+		error = HYPERVISOR_dom0_op(&op);
-+		if (error) {
-+			BUG_ON(error > 0);
-+			goto out;
-+		}
-+	}
-+	error = reg;
-+ out:
-+	mutex_unlock(&mtrr_mutex);
-+	return error;
-+}
-+
-+int
-+mtrr_del(int reg, unsigned long base, unsigned long size)
-+{
-+	if (mtrr_check(base, size))
-+		return -EINVAL;
-+	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
-+}
-+
-+EXPORT_SYMBOL(mtrr_add);
-+EXPORT_SYMBOL(mtrr_del);
-+
-+void __init mtrr_bp_init(void)
-+{
-+}
-+
-+void mtrr_ap_init(void)
-+{
-+}
-+
-+static int __init mtrr_init(void)
-+{
-+	struct cpuinfo_x86 *c = &boot_cpu_data;
-+
-+	if (!is_initial_xendomain())
-+		return -ENODEV;
-+
-+	if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
-+	    (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
-+	    (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
-+	    (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
-+		return -ENODEV;
++	# perform syscall exit tracing
++	ALIGN
++syscall_trace_entry:
++	movl $-ENOSYS,PT_EAX(%esp)
++	movl %esp, %eax
++	xorl %edx,%edx
++	call do_syscall_trace
++	cmpl $0, %eax
++	jne resume_userspace		# ret != 0 -> running under PTRACE_SYSEMU,
++					# so must skip actual syscall
++	movl PT_ORIG_EAX(%esp), %eax
++	cmpl $(nr_syscalls), %eax
++	jnae syscall_call
++	jmp syscall_exit
 +
-+	set_num_var_ranges();
-+	init_table();
++	# perform syscall exit tracing
++	ALIGN
++syscall_exit_work:
++	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
++	jz work_pending
++	TRACE_IRQS_ON
++	ENABLE_INTERRUPTS(CLBR_ANY)	# could let do_syscall_trace() call
++					# schedule() instead
++	movl %esp, %eax
++	movl $1, %edx
++	call do_syscall_trace
++	jmp resume_userspace
++	CFI_ENDPROC
 +
-+	return 0;
-+}
++	RING0_INT_FRAME			# can't unwind into user space anyway
++syscall_fault:
++	pushl %eax			# save orig_eax
++	CFI_ADJUST_CFA_OFFSET 4
++	SAVE_ALL
++	GET_THREAD_INFO(%ebp)
++	movl $-EFAULT,PT_EAX(%esp)
++	jmp resume_userspace
 +
-+subsys_initcall(mtrr_init);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/cpu/mtrr/Makefile linux-2.6.18-xen/arch/i386/kernel/cpu/mtrr/Makefile
---- linux-2.6.18.3/arch/i386/kernel/cpu/mtrr/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/kernel/cpu/mtrr/Makefile	2006-11-19 14:26:22.000000000 +0100
-@@ -3,3 +3,10 @@
- obj-y		+= cyrix.o
- obj-y		+= centaur.o
- 
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+n-obj-xen := generic.o state.o amd.o cyrix.o centaur.o
++syscall_badsys:
++	movl $-ENOSYS,PT_EAX(%esp)
++	jmp resume_userspace
++	CFI_ENDPROC
 +
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/early_printk-xen.c linux-2.6.18-xen/arch/i386/kernel/early_printk-xen.c
---- linux-2.6.18.3/arch/i386/kernel/early_printk-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/early_printk-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,2 @@
++#ifndef CONFIG_XEN
++#define FIXUP_ESPFIX_STACK \
++	/* since we are on a wrong stack, we cant make it a C code :( */ \
++	movl %gs:PDA_cpu, %ebx; \
++	PER_CPU(cpu_gdt_descr, %ebx); \
++	movl GDS_address(%ebx), %ebx; \
++	GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
++	addl %esp, %eax; \
++	pushl $__KERNEL_DS; \
++	CFI_ADJUST_CFA_OFFSET 4; \
++	pushl %eax; \
++	CFI_ADJUST_CFA_OFFSET 4; \
++	lss (%esp), %esp; \
++	CFI_ADJUST_CFA_OFFSET -8;
++#define UNWIND_ESPFIX_STACK \
++	movl %ss, %eax; \
++	/* see if on espfix stack */ \
++	cmpw $__ESPFIX_SS, %ax; \
++	jne 27f; \
++	movl $__KERNEL_DS, %eax; \
++	movl %eax, %ds; \
++	movl %eax, %es; \
++	/* switch to normal stack */ \
++	FIXUP_ESPFIX_STACK; \
++27:;
 +
-+#include "../../x86_64/kernel/early_printk-xen.c"
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/entry.S linux-2.6.18-xen/arch/i386/kernel/entry.S
---- linux-2.6.18.3/arch/i386/kernel/entry.S	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/kernel/entry.S	2006-11-19 14:26:22.000000000 +0100
-@@ -269,7 +269,7 @@
- 	CFI_STARTPROC simple
- 	CFI_DEF_CFA esp, 0
- 	CFI_REGISTER esp, ebp
--	movl TSS_sysenter_esp0(%esp),%esp
-+	movl SYSENTER_stack_esp0(%esp),%esp
- sysenter_past_esp:
- 	/*
- 	 * No need to follow this irqs on/off section: the syscall
-@@ -689,7 +689,7 @@
-  * that sets up the real kernel stack. Check here, since we can't
-  * allow the wrong stack to be used.
-  *
-- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
-+ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
-  * already pushed 3 words if it hits on the sysenter instruction:
-  * eflags, cs and eip.
-  *
-@@ -701,7 +701,7 @@
- 	cmpw $__KERNEL_CS,4(%esp);		\
- 	jne ok;					\
- label:						\
--	movl TSS_sysenter_esp0+offset(%esp),%esp;	\
-+	movl SYSENTER_stack_esp0+offset(%esp),%esp;	\
- 	pushfl;					\
- 	pushl $__KERNEL_CS;			\
- 	pushl $sysenter_past_esp
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/entry-xen.S linux-2.6.18-xen/arch/i386/kernel/entry-xen.S
---- linux-2.6.18.3/arch/i386/kernel/entry-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/entry-xen.S	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,1213 @@
 +/*
-+ *  linux/arch/i386/entry.S
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
++ * Build the entry stubs and pointer table with
++ * some assembler magic.
 + */
++.data
++ENTRY(interrupt)
++.text
++
++vector=0
++ENTRY(irq_entries_start)
++	RING0_INT_FRAME
++.rept NR_IRQS
++	ALIGN
++ .if vector
++	CFI_ADJUST_CFA_OFFSET -4
++ .endif
++1:	pushl $~(vector)
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp common_interrupt
++.data
++	.long 1b
++.text
++vector=vector+1
++.endr
 +
 +/*
-+ * entry.S contains the system-call and fault low-level handling routines.
-+ * This also contains the timer-interrupt handler, as well as all interrupts
-+ * and faults that can result in a task-switch.
-+ *
-+ * NOTE: This code handles signal-recognition, which happens every time
-+ * after a timer-interrupt and after each system call.
-+ *
-+ * I changed all the .align's to 4 (16 byte alignment), as that's faster
-+ * on a 486.
-+ *
-+ * Stack layout in 'ret_from_system_call':
-+ * 	ptrace needs to have all regs on the stack.
-+ *	if the order here is changed, it needs to be
-+ *	updated in fork.c:copy_process, signal.c:do_signal,
-+ *	ptrace.c and ptrace.h
-+ *
-+ *	 0(%esp) - %ebx
-+ *	 4(%esp) - %ecx
-+ *	 8(%esp) - %edx
-+ *       C(%esp) - %esi
-+ *	10(%esp) - %edi
-+ *	14(%esp) - %ebp
-+ *	18(%esp) - %eax
-+ *	1C(%esp) - %ds
-+ *	20(%esp) - %es
-+ *	24(%esp) - orig_eax
-+ *	28(%esp) - %eip
-+ *	2C(%esp) - %cs
-+ *	30(%esp) - %eflags
-+ *	34(%esp) - %oldesp
-+ *	38(%esp) - %oldss
-+ *
-+ * "current" is in register %ebx during any slow entries.
++ * the CPU automatically disables interrupts when executing an IRQ vector,
++ * so IRQ-flags tracing has to follow that:
 + */
++	ALIGN
++common_interrupt:
++	SAVE_ALL
++	TRACE_IRQS_OFF
++	movl %esp,%eax
++	call do_IRQ
++	jmp ret_from_intr
++	CFI_ENDPROC
 +
-+#include <linux/linkage.h>
-+#include <asm/thread_info.h>
-+#include <asm/irqflags.h>
-+#include <asm/errno.h>
-+#include <asm/segment.h>
-+#include <asm/smp.h>
-+#include <asm/page.h>
-+#include <asm/desc.h>
-+#include <asm/dwarf2.h>
-+#include "irq_vectors.h"
-+#include <xen/interface/xen.h>
-+
-+#define nr_syscalls ((syscall_table_size)/4)
-+
-+EBX		= 0x00
-+ECX		= 0x04
-+EDX		= 0x08
-+ESI		= 0x0C
-+EDI		= 0x10
-+EBP		= 0x14
-+EAX		= 0x18
-+DS		= 0x1C
-+ES		= 0x20
-+ORIG_EAX	= 0x24
-+EIP		= 0x28
-+CS		= 0x2C
-+EFLAGS		= 0x30
-+OLDESP		= 0x34
-+OLDSS		= 0x38
-+
-+CF_MASK		= 0x00000001
-+TF_MASK		= 0x00000100
-+IF_MASK		= 0x00000200
-+DF_MASK		= 0x00000400 
-+NT_MASK		= 0x00004000
-+VM_MASK		= 0x00020000
-+/* Pseudo-eflags. */
-+NMI_MASK	= 0x80000000
-+
-+#ifndef CONFIG_XEN
-+#define DISABLE_INTERRUPTS	cli
-+#define ENABLE_INTERRUPTS	sti
-+#else
-+/* Offsets into shared_info_t. */
-+#define evtchn_upcall_pending		/* 0 */
-+#define evtchn_upcall_mask		1
-+
-+#define sizeof_vcpu_shift		6
-+
-+#ifdef CONFIG_SMP
-+#define GET_VCPU_INFO		movl TI_cpu(%ebp),%esi			; \
-+				shl  $sizeof_vcpu_shift,%esi		; \
-+				addl HYPERVISOR_shared_info,%esi
-+#else
-+#define GET_VCPU_INFO		movl HYPERVISOR_shared_info,%esi
-+#endif
-+
-+#define __DISABLE_INTERRUPTS	movb $1,evtchn_upcall_mask(%esi)
-+#define __ENABLE_INTERRUPTS	movb $0,evtchn_upcall_mask(%esi)
-+#define DISABLE_INTERRUPTS	GET_VCPU_INFO				; \
-+				__DISABLE_INTERRUPTS
-+#define ENABLE_INTERRUPTS	GET_VCPU_INFO				; \
-+				__ENABLE_INTERRUPTS
-+#define __TEST_PENDING		testb $0xFF,evtchn_upcall_pending(%esi)
-+#endif
-+
-+#ifdef CONFIG_PREEMPT
-+#define preempt_stop		cli; TRACE_IRQS_OFF
-+#else
-+#define preempt_stop
-+#define resume_kernel		restore_nocheck
-+#endif
-+
-+.macro TRACE_IRQS_IRET
-+#ifdef CONFIG_TRACE_IRQFLAGS
-+	testl $IF_MASK,EFLAGS(%esp)     # interrupts off?
-+	jz 1f
-+	TRACE_IRQS_ON
-+1:
-+#endif
-+.endm
++#define BUILD_INTERRUPT(name, nr)	\
++ENTRY(name)				\
++	RING0_INT_FRAME;		\
++	pushl $~(nr);			\
++	CFI_ADJUST_CFA_OFFSET 4;	\
++	SAVE_ALL;			\
++	TRACE_IRQS_OFF			\
++	movl %esp,%eax;			\
++	call smp_/**/name;		\
++	jmp ret_from_intr;		\
++	CFI_ENDPROC
 +
-+#ifdef CONFIG_VM86
-+#define resume_userspace_sig	check_userspace
++/* The include is where all of the SMP etc. interrupts come from */
++#include "entry_arch.h"
 +#else
-+#define resume_userspace_sig	resume_userspace
++#define UNWIND_ESPFIX_STACK
 +#endif
 +
-+#define SAVE_ALL \
-+	cld; \
-+	pushl %es; \
-+	CFI_ADJUST_CFA_OFFSET 4;\
-+	/*CFI_REL_OFFSET es, 0;*/\
-+	pushl %ds; \
-+	CFI_ADJUST_CFA_OFFSET 4;\
-+	/*CFI_REL_OFFSET ds, 0;*/\
-+	pushl %eax; \
-+	CFI_ADJUST_CFA_OFFSET 4;\
-+	CFI_REL_OFFSET eax, 0;\
-+	pushl %ebp; \
-+	CFI_ADJUST_CFA_OFFSET 4;\
-+	CFI_REL_OFFSET ebp, 0;\
-+	pushl %edi; \
-+	CFI_ADJUST_CFA_OFFSET 4;\
-+	CFI_REL_OFFSET edi, 0;\
-+	pushl %esi; \
-+	CFI_ADJUST_CFA_OFFSET 4;\
-+	CFI_REL_OFFSET esi, 0;\
-+	pushl %edx; \
-+	CFI_ADJUST_CFA_OFFSET 4;\
-+	CFI_REL_OFFSET edx, 0;\
-+	pushl %ecx; \
-+	CFI_ADJUST_CFA_OFFSET 4;\
-+	CFI_REL_OFFSET ecx, 0;\
-+	pushl %ebx; \
-+	CFI_ADJUST_CFA_OFFSET 4;\
-+	CFI_REL_OFFSET ebx, 0;\
-+	movl $(__USER_DS), %edx; \
-+	movl %edx, %ds; \
-+	movl %edx, %es;
-+
-+#define RESTORE_INT_REGS \
-+	popl %ebx;	\
-+	CFI_ADJUST_CFA_OFFSET -4;\
-+	CFI_RESTORE ebx;\
-+	popl %ecx;	\
-+	CFI_ADJUST_CFA_OFFSET -4;\
-+	CFI_RESTORE ecx;\
-+	popl %edx;	\
-+	CFI_ADJUST_CFA_OFFSET -4;\
-+	CFI_RESTORE edx;\
-+	popl %esi;	\
-+	CFI_ADJUST_CFA_OFFSET -4;\
-+	CFI_RESTORE esi;\
-+	popl %edi;	\
-+	CFI_ADJUST_CFA_OFFSET -4;\
-+	CFI_RESTORE edi;\
-+	popl %ebp;	\
-+	CFI_ADJUST_CFA_OFFSET -4;\
-+	CFI_RESTORE ebp;\
-+	popl %eax;	\
-+	CFI_ADJUST_CFA_OFFSET -4;\
-+	CFI_RESTORE eax
-+
-+#define RESTORE_REGS	\
-+	RESTORE_INT_REGS; \
-+1:	popl %ds;	\
-+	CFI_ADJUST_CFA_OFFSET -4;\
-+	/*CFI_RESTORE ds;*/\
-+2:	popl %es;	\
-+	CFI_ADJUST_CFA_OFFSET -4;\
-+	/*CFI_RESTORE es;*/\
-+.section .fixup,"ax";	\
-+3:	movl $0,(%esp);	\
-+	jmp 1b;		\
-+4:	movl $0,(%esp);	\
-+	jmp 2b;		\
-+.previous;		\
-+.section __ex_table,"a";\
-+	.align 4;	\
-+	.long 1b,3b;	\
-+	.long 2b,4b;	\
-+.previous
-+
-+#define RING0_INT_FRAME \
-+	CFI_STARTPROC simple;\
-+	CFI_DEF_CFA esp, 3*4;\
-+	/*CFI_OFFSET cs, -2*4;*/\
-+	CFI_OFFSET eip, -3*4
-+
-+#define RING0_EC_FRAME \
-+	CFI_STARTPROC simple;\
-+	CFI_DEF_CFA esp, 4*4;\
-+	/*CFI_OFFSET cs, -2*4;*/\
-+	CFI_OFFSET eip, -3*4
-+
-+#define RING0_PTREGS_FRAME \
-+	CFI_STARTPROC simple;\
-+	CFI_DEF_CFA esp, OLDESP-EBX;\
-+	/*CFI_OFFSET cs, CS-OLDESP;*/\
-+	CFI_OFFSET eip, EIP-OLDESP;\
-+	/*CFI_OFFSET es, ES-OLDESP;*/\
-+	/*CFI_OFFSET ds, DS-OLDESP;*/\
-+	CFI_OFFSET eax, EAX-OLDESP;\
-+	CFI_OFFSET ebp, EBP-OLDESP;\
-+	CFI_OFFSET edi, EDI-OLDESP;\
-+	CFI_OFFSET esi, ESI-OLDESP;\
-+	CFI_OFFSET edx, EDX-OLDESP;\
-+	CFI_OFFSET ecx, ECX-OLDESP;\
-+	CFI_OFFSET ebx, EBX-OLDESP
-+
-+ENTRY(ret_from_fork)
-+	CFI_STARTPROC
++KPROBE_ENTRY(page_fault)
++	RING0_EC_FRAME
++	pushl $do_page_fault
++	CFI_ADJUST_CFA_OFFSET 4
++	ALIGN
++error_code:
++	/* the function address is in %gs's slot on the stack */
++	pushl %es
++	CFI_ADJUST_CFA_OFFSET 4
++	/*CFI_REL_OFFSET es, 0*/
++	pushl %ds
++	CFI_ADJUST_CFA_OFFSET 4
++	/*CFI_REL_OFFSET ds, 0*/
 +	pushl %eax
 +	CFI_ADJUST_CFA_OFFSET 4
-+	call schedule_tail
-+	GET_THREAD_INFO(%ebp)
-+	popl %eax
-+	CFI_ADJUST_CFA_OFFSET -4
-+	pushl $0x0202			# Reset kernel eflags
-+	CFI_ADJUST_CFA_OFFSET 4
-+	popfl
-+	CFI_ADJUST_CFA_OFFSET -4
-+	jmp syscall_exit
-+	CFI_ENDPROC
-+
-+/*
-+ * Return to user mode is not as complex as all this looks,
-+ * but we want the default path for a system call return to
-+ * go as quickly as possible which is why some of this is
-+ * less clear than it otherwise should be.
-+ */
-+
-+	# userspace resumption stub bypassing syscall exit tracing
-+	ALIGN
-+	RING0_PTREGS_FRAME
-+ret_from_exception:
-+	preempt_stop
-+ret_from_intr:
-+	GET_THREAD_INFO(%ebp)
-+check_userspace:
-+	movl EFLAGS(%esp), %eax		# mix EFLAGS and CS
-+	movb CS(%esp), %al
-+	testl $(VM_MASK | 2), %eax
-+	jz resume_kernel
-+ENTRY(resume_userspace)
-+	DISABLE_INTERRUPTS		# make sure we don't miss an interrupt
-+					# setting need_resched or sigpending
-+					# between sampling and the iret
-+	movl TI_flags(%ebp), %ecx
-+	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done on
-+					# int/exception return?
-+	jne work_pending
-+	jmp restore_all
-+
-+#ifdef CONFIG_PREEMPT
-+ENTRY(resume_kernel)
-+	cli
-+	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
-+	jnz restore_nocheck
-+need_resched:
-+	movl TI_flags(%ebp), %ecx	# need_resched set ?
-+	testb $_TIF_NEED_RESCHED, %cl
-+	jz restore_all
-+	testl $IF_MASK,EFLAGS(%esp)     # interrupts off (exception path) ?
-+	jz restore_all
-+	call preempt_schedule_irq
-+	jmp need_resched
-+#endif
-+	CFI_ENDPROC
-+
-+/* SYSENTER_RETURN points to after the "sysenter" instruction in
-+   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
-+
-+	# sysenter call handler stub
-+ENTRY(sysenter_entry)
-+	CFI_STARTPROC simple
-+	CFI_DEF_CFA esp, 0
-+	CFI_REGISTER esp, ebp
-+	movl SYSENTER_stack_esp0(%esp),%esp
-+sysenter_past_esp:
-+	/*
-+	 * No need to follow this irqs on/off section: the syscall
-+	 * disabled irqs and here we enable it straight after entry:
-+	 */
-+	sti
-+	pushl $(__USER_DS)
-+	CFI_ADJUST_CFA_OFFSET 4
-+	/*CFI_REL_OFFSET ss, 0*/
-+	pushl %ebp
-+	CFI_ADJUST_CFA_OFFSET 4
-+	CFI_REL_OFFSET esp, 0
-+	pushfl
-+	CFI_ADJUST_CFA_OFFSET 4
-+	pushl $(__USER_CS)
-+	CFI_ADJUST_CFA_OFFSET 4
-+	/*CFI_REL_OFFSET cs, 0*/
-+	/*
-+	 * Push current_thread_info()->sysenter_return to the stack.
-+	 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
-+	 * pushed above; +8 corresponds to copy_thread's esp0 setting.
-+	 */
-+	pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
-+	CFI_ADJUST_CFA_OFFSET 4
-+	CFI_REL_OFFSET eip, 0
-+
-+/*
-+ * Load the potential sixth argument from user stack.
-+ * Careful about security.
-+ */
-+	cmpl $__PAGE_OFFSET-3,%ebp
-+	jae syscall_fault
-+1:	movl (%ebp),%ebp
-+.section __ex_table,"a"
-+	.align 4
-+	.long 1b,syscall_fault
-+.previous
-+
-+	pushl %eax
-+	CFI_ADJUST_CFA_OFFSET 4
-+	SAVE_ALL
-+	GET_THREAD_INFO(%ebp)
-+
-+	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-+	testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
-+	jnz syscall_trace_entry
-+	cmpl $(nr_syscalls), %eax
-+	jae syscall_badsys
-+	call *sys_call_table(,%eax,4)
-+	movl %eax,EAX(%esp)
-+	DISABLE_INTERRUPTS
-+	TRACE_IRQS_OFF
-+	movl TI_flags(%ebp), %ecx
-+	testw $_TIF_ALLWORK_MASK, %cx
-+	jne syscall_exit_work
-+/* if something modifies registers it must also disable sysexit */
-+	movl EIP(%esp), %edx
-+	movl OLDESP(%esp), %ecx
-+	xorl %ebp,%ebp
-+	TRACE_IRQS_ON
-+#ifdef CONFIG_XEN
-+	__ENABLE_INTERRUPTS
-+sysexit_scrit:	/**** START OF SYSEXIT CRITICAL REGION ****/
-+	__TEST_PENDING
-+	jnz  14f			# process more events if necessary...
-+	movl ESI(%esp), %esi
-+	sysexit
-+14:	__DISABLE_INTERRUPTS
-+sysexit_ecrit:	/**** END OF SYSEXIT CRITICAL REGION ****/
-+	push %esp
-+	CFI_ADJUST_CFA_OFFSET 4
-+	call evtchn_do_upcall
-+	add  $4,%esp
-+	CFI_ADJUST_CFA_OFFSET -4
-+	jmp  ret_from_intr
-+#else
-+	sti
-+	sysexit
-+#endif /* !CONFIG_XEN */
-+	CFI_ENDPROC
-+
-+
-+	# system call handler stub
-+ENTRY(system_call)
-+	RING0_INT_FRAME			# can't unwind into user space anyway
-+	pushl %eax			# save orig_eax
-+	CFI_ADJUST_CFA_OFFSET 4
-+	SAVE_ALL
-+	GET_THREAD_INFO(%ebp)
-+	testl $TF_MASK,EFLAGS(%esp)
-+	jz no_singlestep
-+	orl $_TIF_SINGLESTEP,TI_flags(%ebp)
-+no_singlestep:
-+					# system call tracing in operation / emulation
-+	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-+	testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
-+	jnz syscall_trace_entry
-+	cmpl $(nr_syscalls), %eax
-+	jae syscall_badsys
-+syscall_call:
-+	call *sys_call_table(,%eax,4)
-+	movl %eax,EAX(%esp)		# store the return value
-+syscall_exit:
-+	DISABLE_INTERRUPTS		# make sure we don't miss an interrupt
-+					# setting need_resched or sigpending
-+					# between sampling and the iret
-+	TRACE_IRQS_OFF
-+	movl TI_flags(%ebp), %ecx
-+	testw $_TIF_ALLWORK_MASK, %cx	# current->work
-+	jne syscall_exit_work
-+
-+restore_all:
-+#ifndef CONFIG_XEN
-+	movl EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
-+	# Warning: OLDSS(%esp) contains the wrong/random values if we
-+	# are returning to the kernel.
-+	# See comments in process.c:copy_thread() for details.
-+	movb OLDSS(%esp), %ah
-+	movb CS(%esp), %al
-+	andl $(VM_MASK | (4 << 8) | 3), %eax
-+	cmpl $((4 << 8) | 3), %eax
-+	je ldt_ss			# returning to user-space with LDT SS
-+restore_nocheck:
-+#else
-+restore_nocheck:
-+	movl EFLAGS(%esp), %eax
-+	testl $(VM_MASK|NMI_MASK), %eax
-+	jnz hypervisor_iret
-+	shr $9, %eax			# EAX[0] == IRET_EFLAGS.IF
-+	GET_VCPU_INFO
-+	andb evtchn_upcall_mask(%esi),%al
-+	andb $1,%al			# EAX[0] == IRET_EFLAGS.IF & event_mask
-+	jnz restore_all_enable_events	#        != 0 => enable event delivery
-+#endif
-+	TRACE_IRQS_IRET
-+	CFI_REMEMBER_STATE
-+restore_nocheck_notrace:
-+	RESTORE_REGS
-+	addl $4, %esp
-+	CFI_ADJUST_CFA_OFFSET -4
-+1:	iret
-+.section .fixup,"ax"
-+iret_exc:
-+#ifndef CONFIG_XEN
-+	TRACE_IRQS_ON
-+	sti
-+#endif
-+	pushl $0			# no error code
-+	pushl $do_iret_error
-+	jmp error_code
-+.previous
-+.section __ex_table,"a"
-+	.align 4
-+	.long 1b,iret_exc
-+.previous
-+
-+	CFI_RESTORE_STATE
-+#ifndef CONFIG_XEN
-+ldt_ss:
-+	larl OLDSS(%esp), %eax
-+	jnz restore_nocheck
-+	testl $0x00400000, %eax		# returning to 32bit stack?
-+	jnz restore_nocheck		# allright, normal return
-+	/* If returning to userspace with 16bit stack,
-+	 * try to fix the higher word of ESP, as the CPU
-+	 * won't restore it.
-+	 * This is an "official" bug of all the x86-compatible
-+	 * CPUs, which we can try to work around to make
-+	 * dosemu and wine happy. */
-+	subl $8, %esp		# reserve space for switch16 pointer
-+	CFI_ADJUST_CFA_OFFSET 8
-+	cli
-+	TRACE_IRQS_OFF
-+	movl %esp, %eax
-+	/* Set up the 16bit stack frame with switch32 pointer on top,
-+	 * and a switch16 pointer on top of the current frame. */
-+	call setup_x86_bogus_stack
-+	CFI_ADJUST_CFA_OFFSET -8	# frame has moved
-+	TRACE_IRQS_IRET
-+	RESTORE_REGS
-+	lss 20+4(%esp), %esp	# switch to 16bit stack
-+1:	iret
-+.section __ex_table,"a"
-+	.align 4
-+	.long 1b,iret_exc
-+.previous
-+#else
-+hypervisor_iret:
-+	andl $~NMI_MASK, EFLAGS(%esp)
-+	RESTORE_REGS
-+	addl $4, %esp
-+	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
-+#endif
-+	CFI_ENDPROC
-+
-+	# perform work that needs to be done immediately before resumption
-+	ALIGN
-+	RING0_PTREGS_FRAME		# can't unwind into user space anyway
-+work_pending:
-+	testb $_TIF_NEED_RESCHED, %cl
-+	jz work_notifysig
-+work_resched:
-+	call schedule
-+	DISABLE_INTERRUPTS		# make sure we don't miss an interrupt
-+					# setting need_resched or sigpending
-+					# between sampling and the iret
-+	TRACE_IRQS_OFF
-+	movl TI_flags(%ebp), %ecx
-+	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done other
-+					# than syscall tracing?
-+	jz restore_all
-+	testb $_TIF_NEED_RESCHED, %cl
-+	jnz work_resched
-+
-+work_notifysig:				# deal with pending signals and
-+					# notify-resume requests
-+	testl $VM_MASK, EFLAGS(%esp)
-+	movl %esp, %eax
-+	jne work_notifysig_v86		# returning to kernel-space or
-+					# vm86-space
-+	xorl %edx, %edx
-+	call do_notify_resume
-+	jmp resume_userspace_sig
-+
-+	ALIGN
-+work_notifysig_v86:
-+#ifdef CONFIG_VM86
-+	pushl %ecx			# save ti_flags for do_notify_resume
-+	CFI_ADJUST_CFA_OFFSET 4
-+	call save_v86_state		# %eax contains pt_regs pointer
-+	popl %ecx
-+	CFI_ADJUST_CFA_OFFSET -4
-+	movl %eax, %esp
-+	xorl %edx, %edx
-+	call do_notify_resume
-+	jmp resume_userspace_sig
-+#endif
-+
-+	# perform syscall exit tracing
-+	ALIGN
-+syscall_trace_entry:
-+	movl $-ENOSYS,EAX(%esp)
-+	movl %esp, %eax
-+	xorl %edx,%edx
-+	call do_syscall_trace
-+	cmpl $0, %eax
-+	jne resume_userspace		# ret != 0 -> running under PTRACE_SYSEMU,
-+					# so must skip actual syscall
-+	movl ORIG_EAX(%esp), %eax
-+	cmpl $(nr_syscalls), %eax
-+	jnae syscall_call
-+	jmp syscall_exit
-+
-+	# perform syscall exit tracing
-+	ALIGN
-+syscall_exit_work:
-+	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
-+	jz work_pending
-+	TRACE_IRQS_ON
-+	ENABLE_INTERRUPTS		# could let do_syscall_trace() call
-+					# schedule() instead
-+	movl %esp, %eax
-+	movl $1, %edx
-+	call do_syscall_trace
-+	jmp resume_userspace
-+	CFI_ENDPROC
-+
-+	RING0_INT_FRAME			# can't unwind into user space anyway
-+syscall_fault:
-+	pushl %eax			# save orig_eax
-+	CFI_ADJUST_CFA_OFFSET 4
-+	SAVE_ALL
-+	GET_THREAD_INFO(%ebp)
-+	movl $-EFAULT,EAX(%esp)
-+	jmp resume_userspace
-+
-+syscall_badsys:
-+	movl $-ENOSYS,EAX(%esp)
-+	jmp resume_userspace
-+	CFI_ENDPROC
-+
-+#ifndef CONFIG_XEN
-+#define FIXUP_ESPFIX_STACK \
-+	movl %esp, %eax; \
-+	/* switch to 32bit stack using the pointer on top of 16bit stack */ \
-+	lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
-+	/* copy data from 16bit stack to 32bit stack */ \
-+	call fixup_x86_bogus_stack; \
-+	/* put ESP to the proper location */ \
-+	movl %eax, %esp;
-+#define UNWIND_ESPFIX_STACK \
-+	pushl %eax; \
-+	CFI_ADJUST_CFA_OFFSET 4; \
-+	movl %ss, %eax; \
-+	/* see if on 16bit stack */ \
-+	cmpw $__ESPFIX_SS, %ax; \
-+	je 28f; \
-+27:	popl %eax; \
-+	CFI_ADJUST_CFA_OFFSET -4; \
-+.section .fixup,"ax"; \
-+28:	movl $__KERNEL_DS, %eax; \
-+	movl %eax, %ds; \
-+	movl %eax, %es; \
-+	/* switch to 32bit stack */ \
-+	FIXUP_ESPFIX_STACK; \
-+	jmp 27b; \
-+.previous
-+
-+/*
-+ * Build the entry stubs and pointer table with
-+ * some assembler magic.
-+ */
-+.data
-+ENTRY(interrupt)
-+.text
-+
-+vector=0
-+ENTRY(irq_entries_start)
-+	RING0_INT_FRAME
-+.rept NR_IRQS
-+	ALIGN
-+ .if vector
-+	CFI_ADJUST_CFA_OFFSET -4
-+ .endif
-+1:	pushl $~(vector)
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp common_interrupt
-+.data
-+	.long 1b
-+.text
-+vector=vector+1
-+.endr
-+
-+/*
-+ * the CPU automatically disables interrupts when executing an IRQ vector,
-+ * so IRQ-flags tracing has to follow that:
-+ */
-+	ALIGN
-+common_interrupt:
-+	SAVE_ALL
-+	TRACE_IRQS_OFF
-+	movl %esp,%eax
-+	call do_IRQ
-+	jmp ret_from_intr
-+	CFI_ENDPROC
-+
-+#define BUILD_INTERRUPT(name, nr)	\
-+ENTRY(name)				\
-+	SAVE_ALL			\
-+	RING0_INT_FRAME;		\
-+	pushl $~(nr);			\
-+	CFI_ADJUST_CFA_OFFSET 4;	\
-+	SAVE_ALL;			\
-+	TRACE_IRQS_OFF			\
-+	movl %esp,%eax;			\
-+	call smp_/**/name;		\
-+	jmp ret_from_intr;		\
-+	CFI_ENDPROC
-+
-+/* The include is where all of the SMP etc. interrupts come from */
-+#include "entry_arch.h"
-+#else
-+#define UNWIND_ESPFIX_STACK
-+#endif
-+
-+ENTRY(divide_error)
-+	RING0_INT_FRAME
-+	pushl $0			# no error code
-+	CFI_ADJUST_CFA_OFFSET 4
-+	pushl $do_divide_error
-+	CFI_ADJUST_CFA_OFFSET 4
-+	ALIGN
-+error_code:
-+	pushl %ds
-+	CFI_ADJUST_CFA_OFFSET 4
-+	/*CFI_REL_OFFSET ds, 0*/
-+	pushl %eax
-+	CFI_ADJUST_CFA_OFFSET 4
-+	CFI_REL_OFFSET eax, 0
-+	xorl %eax, %eax
-+	pushl %ebp
++	CFI_REL_OFFSET eax, 0
++	pushl %ebp
 +	CFI_ADJUST_CFA_OFFSET 4
 +	CFI_REL_OFFSET ebp, 0
 +	pushl %edi
@@ -3433,7 +2777,6 @@
 +	pushl %edx
 +	CFI_ADJUST_CFA_OFFSET 4
 +	CFI_REL_OFFSET edx, 0
-+	decl %eax			# eax = -1
 +	pushl %ecx
 +	CFI_ADJUST_CFA_OFFSET 4
 +	CFI_REL_OFFSET ecx, 0
@@ -3441,18 +2784,20 @@
 +	CFI_ADJUST_CFA_OFFSET 4
 +	CFI_REL_OFFSET ebx, 0
 +	cld
-+	pushl %es
++	pushl %gs
 +	CFI_ADJUST_CFA_OFFSET 4
-+	/*CFI_REL_OFFSET es, 0*/
++	/*CFI_REL_OFFSET gs, 0*/
++	movl $(__KERNEL_PDA), %ecx
++	movl %ecx, %gs
 +	UNWIND_ESPFIX_STACK
 +	popl %ecx
 +	CFI_ADJUST_CFA_OFFSET -4
 +	/*CFI_REGISTER es, ecx*/
-+	movl ES(%esp), %edi		# get the function address
-+	movl ORIG_EAX(%esp), %edx	# get the error code
-+	movl %eax, ORIG_EAX(%esp)
-+	movl %ecx, ES(%esp)
-+	/*CFI_REL_OFFSET es, ES*/
++	movl PT_GS(%esp), %edi		# get the function address
++	movl PT_ORIG_EAX(%esp), %edx	# get the error code
++	movl $-1, PT_ORIG_EAX(%esp)	# no syscall to restart
++	mov  %ecx, PT_GS(%esp)
++	/*CFI_REL_OFFSET gs, ES*/
 +	movl $(__USER_DS), %ecx
 +	movl %ecx, %ds
 +	movl %ecx, %es
@@ -3460,6 +2805,7 @@
 +	call *%edi
 +	jmp ret_from_exception
 +	CFI_ENDPROC
++KPROBE_END(page_fault)
 +
 +#ifdef CONFIG_XEN
 +# A note on the "critical region" in our callback handler.
@@ -3483,7 +2829,7 @@
 +	pushl %eax
 +	CFI_ADJUST_CFA_OFFSET 4
 +	SAVE_ALL
-+	movl EIP(%esp),%eax
++	movl PT_EIP(%esp),%eax
 +	cmpl $scrit,%eax
 +	jb   11f
 +	cmpl $ecrit,%eax
@@ -3492,14368 +2838,10832 @@
 +	jb   11f
 +	cmpl $sysexit_ecrit,%eax
 +	ja   11f
-+	# interrupted in sysexit critical
-+	addl $0x34,%esp			# Remove cs...ebx from stack frame.
-+	# this popped off new frame to reuse the old one, therefore no 
-+	# CFI_ADJUST_CFA_OFFSET here
-+11:	push %esp
-+	CFI_ADJUST_CFA_OFFSET 4
-+	call evtchn_do_upcall
-+	add  $4,%esp
-+	CFI_ADJUST_CFA_OFFSET -4
-+	jmp  ret_from_intr
-+
-+        ALIGN
-+restore_all_enable_events:
-+	__ENABLE_INTERRUPTS
-+scrit:	/**** START OF CRITICAL REGION ****/
-+	__TEST_PENDING
-+	jnz  14f			# process more events if necessary...
-+	RESTORE_REGS
-+	addl $4, %esp
-+	CFI_ADJUST_CFA_OFFSET -4
-+1:	iret
-+.section __ex_table,"a"
-+	.align 4
-+	.long 1b,iret_exc
-+.previous
-+14:	__DISABLE_INTERRUPTS
-+	jmp  11b
-+ecrit:  /**** END OF CRITICAL REGION ****/
-+# [How we do the fixup]. We want to merge the current stack frame with the
-+# just-interrupted frame. How we do this depends on where in the critical
-+# region the interrupted handler was executing, and so how many saved
-+# registers are in each frame. We do this quickly using the lookup table
-+# 'critical_fixup_table'. For each byte offset in the critical region, it
-+# provides the number of bytes which have already been popped from the
-+# interrupted stack frame.
-+critical_region_fixup:
-+	addl $critical_fixup_table-scrit,%eax
-+	movzbl (%eax),%eax		# %eax contains num bytes popped
-+	cmpb $0xff,%al                  # 0xff => vcpu_info critical region
-+	jne  15f
-+	GET_THREAD_INFO(%ebp)
-+        xorl %eax,%eax
-+15:	mov  %esp,%esi
-+	add  %eax,%esi			# %esi points at end of src region
-+	mov  %esp,%edi
-+	add  $0x34,%edi			# %edi points at end of dst region
-+	mov  %eax,%ecx
-+	shr  $2,%ecx			# convert words to bytes
-+	je   17f			# skip loop if nothing to copy
-+16:	subl $4,%esi			# pre-decrementing copy loop
-+	subl $4,%edi
-+	movl (%esi),%eax
-+	movl %eax,(%edi)
-+	loop 16b
-+17:	movl %edi,%esp			# final %edi is top of merged stack
-+	# this popped off new frame to reuse the old one, therefore no 
-+	# CFI_DEF_CFA_OFFSET here
-+	jmp  11b
-+	CFI_ENDPROC
-+
-+critical_fixup_table:
-+	.byte 0xff,0xff,0xff		# testb $0xff,(%esi) = __TEST_PENDING
-+	.byte 0xff,0xff			# jnz  14f
-+	.byte 0x00			# pop  %ebx
-+	.byte 0x04			# pop  %ecx
-+	.byte 0x08			# pop  %edx
-+	.byte 0x0c			# pop  %esi
-+	.byte 0x10			# pop  %edi
-+	.byte 0x14			# pop  %ebp
-+	.byte 0x18			# pop  %eax
-+	.byte 0x1c			# pop  %ds
-+	.byte 0x20			# pop  %es
-+	.byte 0x24,0x24,0x24		# add  $4,%esp
-+	.byte 0x28			# iret
-+	.byte 0xff,0xff,0xff,0xff	# movb $1,1(%esi)
-+	.byte 0x00,0x00			# jmp  11b
-+
-+# Hypervisor uses this for application faults while it executes.
-+# We get here for two reasons:
-+#  1. Fault while reloading DS, ES, FS or GS
-+#  2. Fault while executing IRET
-+# Category 1 we fix up by reattempting the load, and zeroing the segment
-+# register if the load fails.
-+# Category 2 we fix up by jumping to do_iret_error. We cannot use the
-+# normal Linux return path in this case because if we use the IRET hypercall
-+# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
-+# We distinguish between categories by maintaining a status value in EAX.
-+ENTRY(failsafe_callback)
-+	RING0_INT_FRAME
-+	pushl %eax
-+	CFI_ADJUST_CFA_OFFSET 4
-+	movl $1,%eax
-+1:	mov 4(%esp),%ds
-+2:	mov 8(%esp),%es
-+3:	mov 12(%esp),%fs
-+4:	mov 16(%esp),%gs
-+	testl %eax,%eax
-+	popl %eax
-+	CFI_ADJUST_CFA_OFFSET -4
-+	jz 5f
-+	addl $16,%esp		# EAX != 0 => Category 2 (Bad IRET)
-+	CFI_ADJUST_CFA_OFFSET -16
-+	jmp iret_exc
-+	CFI_ADJUST_CFA_OFFSET 16
-+5:	addl $16,%esp		# EAX == 0 => Category 1 (Bad segment)
-+	CFI_ADJUST_CFA_OFFSET -16
-+	pushl $0
-+	CFI_ADJUST_CFA_OFFSET 4
-+	SAVE_ALL
-+	jmp ret_from_exception
-+.section .fixup,"ax";		\
-+6:	xorl %eax,%eax;		\
-+	movl %eax,4(%esp);	\
-+	jmp 1b;			\
-+7:	xorl %eax,%eax;		\
-+	movl %eax,8(%esp);	\
-+	jmp 2b;			\
-+8:	xorl %eax,%eax;		\
-+	movl %eax,12(%esp);	\
-+	jmp 3b;			\
-+9:	xorl %eax,%eax;		\
-+	movl %eax,16(%esp);	\
-+	jmp 4b;			\
-+.previous;			\
-+.section __ex_table,"a";	\
-+	.align 4;		\
-+	.long 1b,6b;		\
-+	.long 2b,7b;		\
-+	.long 3b,8b;		\
-+	.long 4b,9b;		\
-+.previous
-+	CFI_ENDPROC
-+#endif
-+
-+ENTRY(coprocessor_error)
-+	RING0_INT_FRAME
-+	pushl $0
-+	CFI_ADJUST_CFA_OFFSET 4
-+	pushl $do_coprocessor_error
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+
-+ENTRY(simd_coprocessor_error)
-+	RING0_INT_FRAME
-+	pushl $0
-+	CFI_ADJUST_CFA_OFFSET 4
-+	pushl $do_simd_coprocessor_error
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+
-+ENTRY(device_not_available)
-+	RING0_INT_FRAME
-+	pushl $-1			# mark this as an int
-+	CFI_ADJUST_CFA_OFFSET 4
-+	SAVE_ALL
-+#ifndef CONFIG_XEN
-+	movl %cr0, %eax
-+	testl $0x4, %eax		# EM (math emulation bit)
-+	je device_available_emulate
-+	pushl $0			# temporary storage for ORIG_EIP
-+	CFI_ADJUST_CFA_OFFSET 4
-+	call math_emulate
-+	addl $4, %esp
-+	CFI_ADJUST_CFA_OFFSET -4
-+	jmp ret_from_exception
-+device_available_emulate:
-+#endif
-+	preempt_stop
-+	call math_state_restore
-+	jmp ret_from_exception
-+	CFI_ENDPROC
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Debug traps and NMI can happen at the one SYSENTER instruction
-+ * that sets up the real kernel stack. Check here, since we can't
-+ * allow the wrong stack to be used.
-+ *
-+ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
-+ * already pushed 3 words if it hits on the sysenter instruction:
-+ * eflags, cs and eip.
-+ *
-+ * We just load the right stack, and push the three (known) values
-+ * by hand onto the new stack - while updating the return eip past
-+ * the instruction that would have done it for sysenter.
-+ */
-+#define FIX_STACK(offset, ok, label)		\
-+	cmpw $__KERNEL_CS,4(%esp);		\
-+	jne ok;					\
-+label:						\
-+	movl SYSENTER_stack_esp0+offset(%esp),%esp;	\
-+	pushfl;					\
-+	pushl $__KERNEL_CS;			\
-+	pushl $sysenter_past_esp
-+#endif /* CONFIG_XEN */
-+
-+KPROBE_ENTRY(debug)
-+	RING0_INT_FRAME
-+#ifndef CONFIG_XEN
-+	cmpl $sysenter_entry,(%esp)
-+	jne debug_stack_correct
-+	FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
-+debug_stack_correct:
-+#endif /* !CONFIG_XEN */
-+	pushl $-1			# mark this as an int
-+	CFI_ADJUST_CFA_OFFSET 4
-+	SAVE_ALL
-+	xorl %edx,%edx			# error code 0
-+	movl %esp,%eax			# pt_regs pointer
-+	call do_debug
-+	jmp ret_from_exception
-+	CFI_ENDPROC
-+	.previous .text
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * NMI is doubly nasty. It can happen _while_ we're handling
-+ * a debug fault, and the debug fault hasn't yet been able to
-+ * clear up the stack. So we first check whether we got  an
-+ * NMI on the sysenter entry path, but after that we need to
-+ * check whether we got an NMI on the debug path where the debug
-+ * fault happened on the sysenter path.
-+ */
-+ENTRY(nmi)
-+	RING0_INT_FRAME
-+	pushl %eax
-+	CFI_ADJUST_CFA_OFFSET 4
-+	movl %ss, %eax
-+	cmpw $__ESPFIX_SS, %ax
-+	popl %eax
-+	CFI_ADJUST_CFA_OFFSET -4
-+	je nmi_16bit_stack
-+	cmpl $sysenter_entry,(%esp)
-+	je nmi_stack_fixup
-+	pushl %eax
-+	CFI_ADJUST_CFA_OFFSET 4
-+	movl %esp,%eax
-+	/* Do not access memory above the end of our stack page,
-+	 * it might not exist.
-+	 */
-+	andl $(THREAD_SIZE-1),%eax
-+	cmpl $(THREAD_SIZE-20),%eax
-+	popl %eax
-+	CFI_ADJUST_CFA_OFFSET -4
-+	jae nmi_stack_correct
-+	cmpl $sysenter_entry,12(%esp)
-+	je nmi_debug_stack_check
-+nmi_stack_correct:
-+	pushl %eax
-+	CFI_ADJUST_CFA_OFFSET 4
-+	SAVE_ALL
-+	xorl %edx,%edx		# zero error code
-+	movl %esp,%eax		# pt_regs pointer
-+	call do_nmi
-+	jmp restore_nocheck_notrace
-+	CFI_ENDPROC
-+
-+nmi_stack_fixup:
-+	FIX_STACK(12,nmi_stack_correct, 1)
-+	jmp nmi_stack_correct
-+nmi_debug_stack_check:
-+	cmpw $__KERNEL_CS,16(%esp)
-+	jne nmi_stack_correct
-+	cmpl $debug,(%esp)
-+	jb nmi_stack_correct
-+	cmpl $debug_esp_fix_insn,(%esp)
-+	ja nmi_stack_correct
-+	FIX_STACK(24,nmi_stack_correct, 1)
-+	jmp nmi_stack_correct
-+
-+nmi_16bit_stack:
-+	RING0_INT_FRAME
-+	/* create the pointer to lss back */
-+	pushl %ss
-+	CFI_ADJUST_CFA_OFFSET 4
-+	pushl %esp
-+	CFI_ADJUST_CFA_OFFSET 4
-+	movzwl %sp, %esp
-+	addw $4, (%esp)
-+	/* copy the iret frame of 12 bytes */
-+	.rept 3
-+	pushl 16(%esp)
-+	CFI_ADJUST_CFA_OFFSET 4
-+	.endr
-+	pushl %eax
-+	CFI_ADJUST_CFA_OFFSET 4
-+	SAVE_ALL
-+	FIXUP_ESPFIX_STACK		# %eax == %esp
-+	CFI_ADJUST_CFA_OFFSET -20	# the frame has now moved
-+	xorl %edx,%edx			# zero error code
-+	call do_nmi
-+	RESTORE_REGS
-+	lss 12+4(%esp), %esp		# back to 16bit stack
-+1:	iret
-+	CFI_ENDPROC
-+.section __ex_table,"a"
-+	.align 4
-+	.long 1b,iret_exc
-+.previous
-+#else
-+ENTRY(nmi)
-+	RING0_INT_FRAME
-+	pushl %eax
-+	CFI_ADJUST_CFA_OFFSET 4
-+	SAVE_ALL
-+	xorl %edx,%edx		# zero error code
-+	movl %esp,%eax		# pt_regs pointer
-+	call do_nmi
-+	orl  $NMI_MASK, EFLAGS(%esp)
-+	jmp restore_all
-+	CFI_ENDPROC
-+#endif
-+
-+KPROBE_ENTRY(int3)
-+	RING0_INT_FRAME
-+	pushl $-1			# mark this as an int
-+	CFI_ADJUST_CFA_OFFSET 4
-+	SAVE_ALL
-+	xorl %edx,%edx		# zero error code
-+	movl %esp,%eax		# pt_regs pointer
-+	call do_int3
-+	jmp ret_from_exception
-+	CFI_ENDPROC
-+	.previous .text
-+
-+ENTRY(overflow)
-+	RING0_INT_FRAME
-+	pushl $0
-+	CFI_ADJUST_CFA_OFFSET 4
-+	pushl $do_overflow
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+
-+ENTRY(bounds)
-+	RING0_INT_FRAME
-+	pushl $0
-+	CFI_ADJUST_CFA_OFFSET 4
-+	pushl $do_bounds
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+
-+ENTRY(invalid_op)
-+	RING0_INT_FRAME
-+	pushl $0
-+	CFI_ADJUST_CFA_OFFSET 4
-+	pushl $do_invalid_op
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+
-+ENTRY(coprocessor_segment_overrun)
-+	RING0_INT_FRAME
-+	pushl $0
-+	CFI_ADJUST_CFA_OFFSET 4
-+	pushl $do_coprocessor_segment_overrun
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+
-+ENTRY(invalid_TSS)
-+	RING0_EC_FRAME
-+	pushl $do_invalid_TSS
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+
-+ENTRY(segment_not_present)
-+	RING0_EC_FRAME
-+	pushl $do_segment_not_present
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+
-+ENTRY(stack_segment)
-+	RING0_EC_FRAME
-+	pushl $do_stack_segment
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+
-+KPROBE_ENTRY(general_protection)
-+	RING0_EC_FRAME
-+	pushl $do_general_protection
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+	.previous .text
-+
-+ENTRY(alignment_check)
-+	RING0_EC_FRAME
-+	pushl $do_alignment_check
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+
-+KPROBE_ENTRY(page_fault)
-+	RING0_EC_FRAME
-+	pushl $do_page_fault
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+	.previous .text
-+
-+#ifdef CONFIG_X86_MCE
-+ENTRY(machine_check)
-+	RING0_INT_FRAME
-+	pushl $0
-+	CFI_ADJUST_CFA_OFFSET 4
-+	pushl machine_check_vector
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+#endif
-+
-+ENTRY(fixup_4gb_segment)
-+	RING0_INT_FRAME
-+	pushl $do_fixup_4gb_segment
-+	CFI_ADJUST_CFA_OFFSET 4
-+	jmp error_code
-+	CFI_ENDPROC
-+
-+#ifdef CONFIG_STACK_UNWIND
-+ENTRY(arch_unwind_init_running)
-+	CFI_STARTPROC
-+	movl	4(%esp), %edx
-+	movl	(%esp), %ecx
-+	leal	4(%esp), %eax
-+	movl	%ebx, EBX(%edx)
-+	xorl	%ebx, %ebx
-+	movl	%ebx, ECX(%edx)
-+	movl	%ebx, EDX(%edx)
-+	movl	%esi, ESI(%edx)
-+	movl	%edi, EDI(%edx)
-+	movl	%ebp, EBP(%edx)
-+	movl	%ebx, EAX(%edx)
-+	movl	$__USER_DS, DS(%edx)
-+	movl	$__USER_DS, ES(%edx)
-+	movl	%ebx, ORIG_EAX(%edx)
-+	movl	%ecx, EIP(%edx)
-+	movl	12(%esp), %ecx
-+	movl	$__KERNEL_CS, CS(%edx)
-+	movl	%ebx, EFLAGS(%edx)
-+	movl	%eax, OLDESP(%edx)
-+	movl	8(%esp), %eax
-+	movl	%ecx, 8(%esp)
-+	movl	EBX(%edx), %ebx
-+	movl	$__KERNEL_DS, OLDSS(%edx)
-+	jmpl	*%eax
-+	CFI_ENDPROC
-+ENDPROC(arch_unwind_init_running)
-+#endif
-+
-+.section .rodata,"a"
-+#include "syscall_table.S"
-+
-+syscall_table_size=(.-sys_call_table)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/fixup.c linux-2.6.18-xen/arch/i386/kernel/fixup.c
---- linux-2.6.18.3/arch/i386/kernel/fixup.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/fixup.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,92 @@
-+/******************************************************************************
-+ * fixup.c
-+ * 
-+ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
-+ * Used to avoid repeated slow emulation of common instructions used by the
-+ * user-space TLS (Thread-Local Storage) libraries.
-+ * 
-+ * **** NOTE ****
-+ *  Issues with the binary rewriting have caused it to be removed. Instead
-+ *  we rely on Xen's emulator to boot the kernel, and then print a banner
-+ *  message recommending that the user disables /lib/tls.
-+ * 
-+ * Copyright (c) 2004, K A Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ * 
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ * 
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/kernel.h>
-+#include <linux/delay.h>
-+#include <linux/version.h>
-+
-+#define DP(_f, _args...) printk(KERN_ALERT "  " _f "\n" , ## _args )
-+
-+fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
-+{
-+#if 0
-+	static unsigned long printed = 0;
-+	char info[100];
-+	int i;
-+
-+	if (test_and_set_bit(0, &printed))
-+		return;
-+
-+	HYPERVISOR_vm_assist(
-+		VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
-+
-+	sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
-+
-+
-+	DP("");
-+	DP("***************************************************************");
-+	DP("***************************************************************");
-+	DP("** WARNING: Currently emulating unsupported memory accesses  **");
-+	DP("**          in /lib/tls glibc libraries. The emulation is    **");
-+	DP("**          slow. To ensure full performance you should      **");
-+	DP("**          install a 'xen-friendly' (nosegneg) version of   **");
-+	DP("**          the library, or disable tls support by executing **");
-+	DP("**          the following as root:                           **");
-+	DP("**          mv /lib/tls /lib/tls.disabled                    **");
-+	DP("** Offending process: %-38.38s **", info);
-+	DP("***************************************************************");
-+	DP("***************************************************************");
-+	DP("");
-+
-+	for (i = 5; i > 0; i--) {
-+		touch_softlockup_watchdog();
-+		printk("Pausing... %d", i);
-+		mdelay(1000);
-+		printk("\b\b\b\b\b\b\b\b\b\b\b\b");
-+	}
-+
-+	printk("Continuing...\n\n");
-+#else
-+	if (printk_ratelimit())
-+		printk(KERN_WARNING
-+		       "4gb seg fixup, process %s (pid %d), cs:ip %02x:%08lx\n",
-+		       current->comm, current->tgid, regs->xcs, regs->eip);
-+#endif
-+}
-+
-+static int __init fixup_init(void)
-+{
-+	HYPERVISOR_vm_assist(
-+		VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
-+	return 0;
-+}
-+__initcall(fixup_init);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/head-xen.S linux-2.6.18-xen/arch/i386/kernel/head-xen.S
---- linux-2.6.18.3/arch/i386/kernel/head-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/head-xen.S	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,200 @@
-+
-+
-+.text
-+#include <linux/elfnote.h>
-+#include <linux/threads.h>
-+#include <linux/linkage.h>
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/thread_info.h>
-+#include <asm/asm-offsets.h>
-+#include <xen/interface/arch-x86_32.h>
-+#include <xen/interface/elfnote.h>
-+
-+/*
-+ * References to members of the new_cpu_data structure.
-+ */
-+
-+#define X86		new_cpu_data+CPUINFO_x86
-+#define X86_VENDOR	new_cpu_data+CPUINFO_x86_vendor
-+#define X86_MODEL	new_cpu_data+CPUINFO_x86_model
-+#define X86_MASK	new_cpu_data+CPUINFO_x86_mask
-+#define X86_HARD_MATH	new_cpu_data+CPUINFO_hard_math
-+#define X86_CPUID	new_cpu_data+CPUINFO_cpuid_level
-+#define X86_CAPABILITY	new_cpu_data+CPUINFO_x86_capability
-+#define X86_VENDOR_ID	new_cpu_data+CPUINFO_x86_vendor_id
-+
-+#define VIRT_ENTRY_OFFSET 0x0
-+.org VIRT_ENTRY_OFFSET
-+ENTRY(startup_32)
-+	movl %esi,xen_start_info
-+	cld
-+
-+	/* Set up the stack pointer */
-+	movl $(init_thread_union+THREAD_SIZE),%esp
-+
-+	/* get vendor info */
-+	xorl %eax,%eax			# call CPUID with 0 -> return vendor ID
-+	XEN_CPUID
-+	movl %eax,X86_CPUID		# save CPUID level
-+	movl %ebx,X86_VENDOR_ID		# lo 4 chars
-+	movl %edx,X86_VENDOR_ID+4	# next 4 chars
-+	movl %ecx,X86_VENDOR_ID+8	# last 4 chars
-+
-+	movl $1,%eax		# Use the CPUID instruction to get CPU type
-+	XEN_CPUID
-+	movb %al,%cl		# save reg for future use
-+	andb $0x0f,%ah		# mask processor family
-+	movb %ah,X86
-+	andb $0xf0,%al		# mask model
-+	shrb $4,%al
-+	movb %al,X86_MODEL
-+	andb $0x0f,%cl		# mask mask revision
-+	movb %cl,X86_MASK
-+	movl %edx,X86_CAPABILITY
-+
-+	movb $1,X86_HARD_MATH
-+
-+	xorl %eax,%eax			# Clear FS/GS and LDT
-+	movl %eax,%fs
-+	movl %eax,%gs
-+	cld			# gcc2 wants the direction flag cleared at all times
-+
-+	call start_kernel
-+L6:
-+	jmp L6			# main should never return here, but
-+				# just in case, we know what happens.
-+
-+#define HYPERCALL_PAGE_OFFSET 0x1000
-+.org HYPERCALL_PAGE_OFFSET
-+ENTRY(hypercall_page)
-+.skip 0x1000
-+
-+/*
-+ * Real beginning of normal "text" segment
-+ */
-+ENTRY(stext)
-+ENTRY(_stext)
-+
-+/*
-+ * BSS section
-+ */
-+.section ".bss.page_aligned","w"
-+ENTRY(empty_zero_page)
-+	.fill 4096,1,0
-+
-+/*
-+ * This starts the data section.
-+ */
-+.data
-+
-+/*
-+ * The Global Descriptor Table contains 28 quadwords, per-CPU.
-+ */
-+ENTRY(cpu_gdt_table)
-+	.quad 0x0000000000000000	/* NULL descriptor */
-+	.quad 0x0000000000000000	/* 0x0b reserved */
-+	.quad 0x0000000000000000	/* 0x13 reserved */
-+	.quad 0x0000000000000000	/* 0x1b reserved */
-+	.quad 0x0000000000000000	/* 0x20 unused */
-+	.quad 0x0000000000000000	/* 0x28 unused */
-+	.quad 0x0000000000000000	/* 0x33 TLS entry 1 */
-+	.quad 0x0000000000000000	/* 0x3b TLS entry 2 */
-+	.quad 0x0000000000000000	/* 0x43 TLS entry 3 */
-+	.quad 0x0000000000000000	/* 0x4b reserved */
-+	.quad 0x0000000000000000	/* 0x53 reserved */
-+	.quad 0x0000000000000000	/* 0x5b reserved */
-+
-+	.quad 0x00cf9a000000ffff	/* 0x60 kernel 4GB code at 0x00000000 */
-+	.quad 0x00cf92000000ffff	/* 0x68 kernel 4GB data at 0x00000000 */
-+	.quad 0x00cffa000000ffff	/* 0x73 user 4GB code at 0x00000000 */
-+	.quad 0x00cff2000000ffff	/* 0x7b user 4GB data at 0x00000000 */
-+
-+	.quad 0x0000000000000000	/* 0x80 TSS descriptor */
-+	.quad 0x0000000000000000	/* 0x88 LDT descriptor */
-+
-+	/*
-+	 * Segments used for calling PnP BIOS have byte granularity.
-+	 * They code segments and data segments have fixed 64k limits,
-+	 * the transfer segment sizes are set at run time.
-+	 */
-+	.quad 0x0000000000000000	/* 0x90 32-bit code */
-+	.quad 0x0000000000000000	/* 0x98 16-bit code */
-+	.quad 0x0000000000000000	/* 0xa0 16-bit data */
-+	.quad 0x0000000000000000	/* 0xa8 16-bit data */
-+	.quad 0x0000000000000000	/* 0xb0 16-bit data */
-+
-+	/*
-+	 * The APM segments have byte granularity and their bases
-+	 * are set at run time.  All have 64k limits.
-+	 */
-+	.quad 0x0000000000000000	/* 0xb8 APM CS    code */
-+	.quad 0x0000000000000000	/* 0xc0 APM CS 16 code (16 bit) */
-+	.quad 0x0000000000000000	/* 0xc8 APM DS    data */
-+
-+	.quad 0x0000000000000000	/* 0xd0 - ESPFIX 16-bit SS */
-+	.quad 0x0000000000000000	/* 0xd8 - unused */
-+	.quad 0x0000000000000000	/* 0xe0 - unused */
-+	.quad 0x0000000000000000	/* 0xe8 - unused */
-+	.quad 0x0000000000000000	/* 0xf0 - unused */
-+	.quad 0x0000000000000000	/* 0xf8 - GDT entry 31: double-fault TSS */
-+
-+#ifdef CONFIG_XEN_COMPAT_030002
-+/*
-+ * __xen_guest information
-+ */
-+.macro utoa value
-+ .if (\value) < 0 || (\value) >= 0x10
-+	utoa (((\value)>>4)&0x0fffffff)
-+ .endif
-+ .if ((\value) & 0xf) < 10
-+  .byte '0' + ((\value) & 0xf)
-+ .else
-+  .byte 'A' + ((\value) & 0xf) - 10
-+ .endif
-+.endm
-+
-+.section __xen_guest
-+	.ascii	"GUEST_OS=linux,GUEST_VER=2.6"
-+	.ascii	",XEN_VER=xen-3.0"
-+	.ascii	",VIRT_BASE=0x"
-+		utoa __PAGE_OFFSET
-+	.ascii	",ELF_PADDR_OFFSET=0x"
-+		utoa __PAGE_OFFSET
-+	.ascii	",VIRT_ENTRY=0x"
-+		utoa (__PAGE_OFFSET + __PHYSICAL_START + VIRT_ENTRY_OFFSET)
-+	.ascii	",HYPERCALL_PAGE=0x"
-+		utoa ((__PHYSICAL_START+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
-+	.ascii  ",FEATURES=writable_page_tables"
-+	.ascii	         "|writable_descriptor_tables"
-+	.ascii	         "|auto_translated_physmap"
-+	.ascii	         "|pae_pgdir_above_4gb"
-+	.ascii	         "|supervisor_mode_kernel"
-+#ifdef CONFIG_X86_PAE
-+	.ascii	",PAE=yes[extended-cr3]"
-+#else
-+	.ascii	",PAE=no"
-+#endif
-+	.ascii	",LOADER=generic"
-+	.byte	0
-+#endif /* CONFIG_XEN_COMPAT_030002 */
-+
-+
-+	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz, "linux")	
-+	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION,  .asciz, "2.6")
-+	ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION,    .asciz, "xen-3.0")
-+	ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE,      .long,  __PAGE_OFFSET)
-+#ifdef CONFIG_XEN_COMPAT_030002
-+	ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET,   .long,  __PAGE_OFFSET)
-+#else
-+	ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET,   .long,  0)
-+#endif /* !CONFIG_XEN_COMPAT_030002 */
-+	ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,          .long,  startup_32)
-+	ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long,  hypercall_page)
-+	ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,       .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
-+#ifdef CONFIG_X86_PAE
-+	ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE,       .asciz, "yes")
-+#else
-+	ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE,       .asciz, "no")
-+#endif
-+	ELFNOTE(Xen, XEN_ELFNOTE_LOADER,         .asciz, "generic")
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/init_task-xen.c linux-2.6.18-xen/arch/i386/kernel/init_task-xen.c
---- linux-2.6.18.3/arch/i386/kernel/init_task-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/init_task-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,51 @@
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/init.h>
-+#include <linux/init_task.h>
-+#include <linux/fs.h>
-+#include <linux/mqueue.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/desc.h>
-+
-+static struct fs_struct init_fs = INIT_FS;
-+static struct files_struct init_files = INIT_FILES;
-+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-+
-+#define swapper_pg_dir ((pgd_t *)NULL)
-+struct mm_struct init_mm = INIT_MM(init_mm);
-+#undef swapper_pg_dir
-+
-+EXPORT_SYMBOL(init_mm);
-+
-+/*
-+ * Initial thread structure.
-+ *
-+ * We need to make sure that this is THREAD_SIZE aligned due to the
-+ * way process stacks are handled. This is done by having a special
-+ * "init_task" linker map entry..
-+ */
-+union thread_union init_thread_union 
-+	__attribute__((__section__(".data.init_task"))) =
-+		{ INIT_THREAD_INFO(init_task) };
-+
-+/*
-+ * Initial task structure.
-+ *
-+ * All other task structs will be allocated on slabs in fork.c
-+ */
-+struct task_struct init_task = INIT_TASK(init_task);
-+
-+EXPORT_SYMBOL(init_task);
-+
-+#ifndef CONFIG_X86_NO_TSS
-+/*
-+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
-+ * no more per-task TSS's.
-+ */ 
-+DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
-+#endif
-+
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/io_apic-xen.c linux-2.6.18-xen/arch/i386/kernel/io_apic-xen.c
---- linux-2.6.18.3/arch/i386/kernel/io_apic-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/io_apic-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,2771 @@
-+/*
-+ *	Intel IO-APIC support for multi-Pentium hosts.
-+ *
-+ *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
-+ *
-+ *	Many thanks to Stig Venaas for trying out countless experimental
-+ *	patches and reporting/debugging problems patiently!
-+ *
-+ *	(c) 1999, Multiple IO-APIC support, developed by
-+ *	Ken-ichi Yaku <yaku at css1.kbnes.nec.co.jp> and
-+ *      Hidemi Kishimoto <kisimoto at css1.kbnes.nec.co.jp>,
-+ *	further tested and cleaned up by Zach Brown <zab at redhat.com>
-+ *	and Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
-+ *					thanks to Eric Gilmore
-+ *					and Rolf G. Tews
-+ *					for testing these extensively
-+ *	Paul Diefenbaugh	:	Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/sched.h>
-+#include <linux/smp_lock.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/compiler.h>
-+#include <linux/acpi.h>
-+#include <linux/module.h>
-+#include <linux/sysdev.h>
-+
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/desc.h>
-+#include <asm/timer.h>
-+#include <asm/i8259.h>
-+#include <asm/nmi.h>
-+
-+#include <mach_apic.h>
-+
-+#include "io_ports.h"
-+
-+#ifdef CONFIG_XEN
-+
-+#include <xen/interface/xen.h>
-+#include <xen/interface/physdev.h>
-+
-+/* Fake i8259 */
-+#define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
-+#define disable_8259A_irq(_irq)  ((void)0)
-+#define i8259A_irq_pending(_irq) (0)
-+
-+unsigned long io_apic_irqs;
-+
-+static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
-+{
-+	struct physdev_apic apic_op;
-+	int ret;
-+
-+	apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+	apic_op.reg = reg;
-+	ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
-+	if (ret)
-+		return ret;
-+	return apic_op.value;
-+}
-+
-+static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-+{
-+	struct physdev_apic apic_op;
-+
-+	apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+	apic_op.reg = reg;
-+	apic_op.value = value;
-+	HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
-+}
-+
-+#define io_apic_read(a,r)    xen_io_apic_read(a,r)
-+#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
-+
-+#endif /* CONFIG_XEN */
-+
-+int (*ioapic_renumber_irq)(int ioapic, int irq);
-+atomic_t irq_mis_count;
-+
-+/* Where if anywhere is the i8259 connect in external int mode */
-+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
-+
-+static DEFINE_SPINLOCK(ioapic_lock);
-+static DEFINE_SPINLOCK(vector_lock);
-+
-+int timer_over_8254 __initdata = 1;
-+
-+/*
-+ *	Is the SiS APIC rmw bug present ?
-+ *	-1 = don't know, 0 = no, 1 = yes
-+ */
-+int sis_apic_bug = -1;
-+
-+/*
-+ * # of IRQ routing registers
-+ */
-+int nr_ioapic_registers[MAX_IO_APICS];
-+
-+int disable_timer_pin_1 __initdata;
-+
-+/*
-+ * Rough estimation of how many shared IRQs there are, can
-+ * be changed anytime.
-+ */
-+#define MAX_PLUS_SHARED_IRQS NR_IRQS
-+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
-+
-+/*
-+ * This is performance-critical, we want to do it O(1)
-+ *
-+ * the indexing order of this array favors 1:1 mappings
-+ * between pins and IRQs.
-+ */
-+
-+static struct irq_pin_list {
-+	int apic, pin, next;
-+} irq_2_pin[PIN_MAP_SIZE];
-+
-+int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
-+#ifdef CONFIG_PCI_MSI
-+#define vector_to_irq(vector) 	\
-+	(platform_legacy_irq(vector) ? vector : vector_irq[vector])
-+#else
-+#define vector_to_irq(vector)	(vector)
-+#endif
-+
-+/*
-+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
-+ * shared ISA-space IRQs, so we have to support them. We are super
-+ * fast in the common case, and fast for shared ISA-space IRQs.
-+ */
-+static void add_pin_to_irq(unsigned int irq, int apic, int pin)
-+{
-+	static int first_free_entry = NR_IRQS;
-+	struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+	while (entry->next)
-+		entry = irq_2_pin + entry->next;
-+
-+	if (entry->pin != -1) {
-+		entry->next = first_free_entry;
-+		entry = irq_2_pin + entry->next;
-+		if (++first_free_entry >= PIN_MAP_SIZE)
-+			panic("io_apic.c: whoops");
-+	}
-+	entry->apic = apic;
-+	entry->pin = pin;
-+}
-+
-+#ifdef CONFIG_XEN
-+#define clear_IO_APIC() ((void)0)
-+#else
-+/*
-+ * Reroute an IRQ to a different pin.
-+ */
-+static void __init replace_pin_at_irq(unsigned int irq,
-+				      int oldapic, int oldpin,
-+				      int newapic, int newpin)
-+{
-+	struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+	while (1) {
-+		if (entry->apic == oldapic && entry->pin == oldpin) {
-+			entry->apic = newapic;
-+			entry->pin = newpin;
-+		}
-+		if (!entry->next)
-+			break;
-+		entry = irq_2_pin + entry->next;
-+	}
-+}
-+
-+static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
-+{
-+	struct irq_pin_list *entry = irq_2_pin + irq;
-+	unsigned int pin, reg;
-+
-+	for (;;) {
-+		pin = entry->pin;
-+		if (pin == -1)
-+			break;
-+		reg = io_apic_read(entry->apic, 0x10 + pin*2);
-+		reg &= ~disable;
-+		reg |= enable;
-+		io_apic_modify(entry->apic, 0x10 + pin*2, reg);
-+		if (!entry->next)
-+			break;
-+		entry = irq_2_pin + entry->next;
-+	}
-+}
-+
-+/* mask = 1 */
-+static void __mask_IO_APIC_irq (unsigned int irq)
-+{
-+	__modify_IO_APIC_irq(irq, 0x00010000, 0);
-+}
-+
-+/* mask = 0 */
-+static void __unmask_IO_APIC_irq (unsigned int irq)
-+{
-+	__modify_IO_APIC_irq(irq, 0, 0x00010000);
-+}
-+
-+/* mask = 1, trigger = 0 */
-+static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
-+{
-+	__modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
-+}
-+
-+/* mask = 0, trigger = 1 */
-+static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
-+{
-+	__modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
-+}
-+
-+static void mask_IO_APIC_irq (unsigned int irq)
-+{
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	__mask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void unmask_IO_APIC_irq (unsigned int irq)
-+{
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	__unmask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
-+{
-+	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
-+	
-+	/* Check delivery_mode to be sure we're not clearing an SMI pin */
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	*(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+	*(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+	if (entry.delivery_mode == dest_SMI)
-+		return;
-+
-+	/*
-+	 * Disable it in the IO-APIC irq-routing table:
-+	 */
-+	memset(&entry, 0, sizeof(entry));
-+	entry.mask = 1;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
-+	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC (void)
-+{
-+	int apic, pin;
-+
-+	for (apic = 0; apic < nr_ioapics; apic++)
-+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
-+			clear_IO_APIC_pin(apic, pin);
-+}
-+
-+#ifdef CONFIG_SMP
-+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
-+{
-+	unsigned long flags;
-+	int pin;
-+	struct irq_pin_list *entry = irq_2_pin + irq;
-+	unsigned int apicid_value;
-+	cpumask_t tmp;
-+	
-+	cpus_and(tmp, cpumask, cpu_online_map);
-+	if (cpus_empty(tmp))
-+		tmp = TARGET_CPUS;
-+
-+	cpus_and(cpumask, tmp, CPU_MASK_ALL);
-+
-+	apicid_value = cpu_mask_to_apicid(cpumask);
-+	/* Prepare to do the io_apic_write */
-+	apicid_value = apicid_value << 24;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	for (;;) {
-+		pin = entry->pin;
-+		if (pin == -1)
-+			break;
-+		io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
-+		if (!entry->next)
-+			break;
-+		entry = irq_2_pin + entry->next;
-+	}
-+	set_irq_info(irq, cpumask);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+#if defined(CONFIG_IRQBALANCE)
-+# include <asm/processor.h>	/* kernel_thread() */
-+# include <linux/kernel_stat.h>	/* kstat */
-+# include <linux/slab.h>		/* kmalloc() */
-+# include <linux/timer.h>	/* time_after() */
-+ 
-+#ifdef CONFIG_BALANCED_IRQ_DEBUG
-+#  define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
-+#  define Dprintk(x...) do { TDprintk(x); } while (0)
-+# else
-+#  define TDprintk(x...) 
-+#  define Dprintk(x...) 
-+# endif
-+
-+#define IRQBALANCE_CHECK_ARCH -999
-+#define MAX_BALANCED_IRQ_INTERVAL	(5*HZ)
-+#define MIN_BALANCED_IRQ_INTERVAL	(HZ/2)
-+#define BALANCED_IRQ_MORE_DELTA		(HZ/10)
-+#define BALANCED_IRQ_LESS_DELTA		(HZ)
-+
-+static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
-+static int physical_balance __read_mostly;
-+static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
-+
-+static struct irq_cpu_info {
-+	unsigned long * last_irq;
-+	unsigned long * irq_delta;
-+	unsigned long irq;
-+} irq_cpu_data[NR_CPUS];
-+
-+#define CPU_IRQ(cpu)		(irq_cpu_data[cpu].irq)
-+#define LAST_CPU_IRQ(cpu,irq)   (irq_cpu_data[cpu].last_irq[irq])
-+#define IRQ_DELTA(cpu,irq) 	(irq_cpu_data[cpu].irq_delta[irq])
-+
-+#define IDLE_ENOUGH(cpu,now) \
-+	(idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
-+
-+#define IRQ_ALLOWED(cpu, allowed_mask)	cpu_isset(cpu, allowed_mask)
-+
-+#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
-+
-+static cpumask_t balance_irq_affinity[NR_IRQS] = {
-+	[0 ... NR_IRQS-1] = CPU_MASK_ALL
-+};
-+
-+void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
-+{
-+	balance_irq_affinity[irq] = mask;
-+}
-+
-+static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
-+			unsigned long now, int direction)
-+{
-+	int search_idle = 1;
-+	int cpu = curr_cpu;
-+
-+	goto inside;
-+
-+	do {
-+		if (unlikely(cpu == curr_cpu))
-+			search_idle = 0;
-+inside:
-+		if (direction == 1) {
-+			cpu++;
-+			if (cpu >= NR_CPUS)
-+				cpu = 0;
-+		} else {
-+			cpu--;
-+			if (cpu == -1)
-+				cpu = NR_CPUS-1;
-+		}
-+	} while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
-+			(search_idle && !IDLE_ENOUGH(cpu,now)));
-+
-+	return cpu;
-+}
-+
-+static inline void balance_irq(int cpu, int irq)
-+{
-+	unsigned long now = jiffies;
-+	cpumask_t allowed_mask;
-+	unsigned int new_cpu;
-+		
-+	if (irqbalance_disabled)
-+		return; 
-+
-+	cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
-+	new_cpu = move(cpu, allowed_mask, now, 1);
-+	if (cpu != new_cpu) {
-+		set_pending_irq(irq, cpumask_of_cpu(new_cpu));
-+	}
-+}
-+
-+static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
-+{
-+	int i, j;
-+	Dprintk("Rotating IRQs among CPUs.\n");
-+	for_each_online_cpu(i) {
-+		for (j = 0; j < NR_IRQS; j++) {
-+			if (!irq_desc[j].action)
-+				continue;
-+			/* Is it a significant load ?  */
-+			if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
-+						useful_load_threshold)
-+				continue;
-+			balance_irq(i, j);
-+		}
-+	}
-+	balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
-+		balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);	
-+	return;
-+}
-+
-+static void do_irq_balance(void)
-+{
-+	int i, j;
-+	unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
-+	unsigned long move_this_load = 0;
-+	int max_loaded = 0, min_loaded = 0;
-+	int load;
-+	unsigned long useful_load_threshold = balanced_irq_interval + 10;
-+	int selected_irq;
-+	int tmp_loaded, first_attempt = 1;
-+	unsigned long tmp_cpu_irq;
-+	unsigned long imbalance = 0;
-+	cpumask_t allowed_mask, target_cpu_mask, tmp;
-+
-+	for_each_possible_cpu(i) {
-+		int package_index;
-+		CPU_IRQ(i) = 0;
-+		if (!cpu_online(i))
-+			continue;
-+		package_index = CPU_TO_PACKAGEINDEX(i);
-+		for (j = 0; j < NR_IRQS; j++) {
-+			unsigned long value_now, delta;
-+			/* Is this an active IRQ? */
-+			if (!irq_desc[j].action)
-+				continue;
-+			if ( package_index == i )
-+				IRQ_DELTA(package_index,j) = 0;
-+			/* Determine the total count per processor per IRQ */
-+			value_now = (unsigned long) kstat_cpu(i).irqs[j];
-+
-+			/* Determine the activity per processor per IRQ */
-+			delta = value_now - LAST_CPU_IRQ(i,j);
-+
-+			/* Update last_cpu_irq[][] for the next time */
-+			LAST_CPU_IRQ(i,j) = value_now;
-+
-+			/* Ignore IRQs whose rate is less than the clock */
-+			if (delta < useful_load_threshold)
-+				continue;
-+			/* update the load for the processor or package total */
-+			IRQ_DELTA(package_index,j) += delta;
-+
-+			/* Keep track of the higher numbered sibling as well */
-+			if (i != package_index)
-+				CPU_IRQ(i) += delta;
-+			/*
-+			 * We have sibling A and sibling B in the package
-+			 *
-+			 * cpu_irq[A] = load for cpu A + load for cpu B
-+			 * cpu_irq[B] = load for cpu B
-+			 */
-+			CPU_IRQ(package_index) += delta;
-+		}
-+	}
-+	/* Find the least loaded processor package */
-+	for_each_online_cpu(i) {
-+		if (i != CPU_TO_PACKAGEINDEX(i))
-+			continue;
-+		if (min_cpu_irq > CPU_IRQ(i)) {
-+			min_cpu_irq = CPU_IRQ(i);
-+			min_loaded = i;
-+		}
-+	}
-+	max_cpu_irq = ULONG_MAX;
-+
-+tryanothercpu:
-+	/* Look for heaviest loaded processor.
-+	 * We may come back to get the next heaviest loaded processor.
-+	 * Skip processors with trivial loads.
-+	 */
-+	tmp_cpu_irq = 0;
-+	tmp_loaded = -1;
-+	for_each_online_cpu(i) {
-+		if (i != CPU_TO_PACKAGEINDEX(i))
-+			continue;
-+		if (max_cpu_irq <= CPU_IRQ(i)) 
-+			continue;
-+		if (tmp_cpu_irq < CPU_IRQ(i)) {
-+			tmp_cpu_irq = CPU_IRQ(i);
-+			tmp_loaded = i;
-+		}
-+	}
-+
-+	if (tmp_loaded == -1) {
-+ 	 /* In the case of small number of heavy interrupt sources, 
-+	  * loading some of the cpus too much. We use Ingo's original 
-+	  * approach to rotate them around.
-+	  */
-+		if (!first_attempt && imbalance >= useful_load_threshold) {
-+			rotate_irqs_among_cpus(useful_load_threshold);
-+			return;
-+		}
-+		goto not_worth_the_effort;
-+	}
-+	
-+	first_attempt = 0;		/* heaviest search */
-+	max_cpu_irq = tmp_cpu_irq;	/* load */
-+	max_loaded = tmp_loaded;	/* processor */
-+	imbalance = (max_cpu_irq - min_cpu_irq) / 2;
-+	
-+	Dprintk("max_loaded cpu = %d\n", max_loaded);
-+	Dprintk("min_loaded cpu = %d\n", min_loaded);
-+	Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
-+	Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
-+	Dprintk("load imbalance = %lu\n", imbalance);
-+
-+	/* if imbalance is less than approx 10% of max load, then
-+	 * observe diminishing returns action. - quit
-+	 */
-+	if (imbalance < (max_cpu_irq >> 3)) {
-+		Dprintk("Imbalance too trivial\n");
-+		goto not_worth_the_effort;
-+	}
-+
-+tryanotherirq:
-+	/* if we select an IRQ to move that can't go where we want, then
-+	 * see if there is another one to try.
-+	 */
-+	move_this_load = 0;
-+	selected_irq = -1;
-+	for (j = 0; j < NR_IRQS; j++) {
-+		/* Is this an active IRQ? */
-+		if (!irq_desc[j].action)
-+			continue;
-+		if (imbalance <= IRQ_DELTA(max_loaded,j))
-+			continue;
-+		/* Try to find the IRQ that is closest to the imbalance
-+		 * without going over.
-+		 */
-+		if (move_this_load < IRQ_DELTA(max_loaded,j)) {
-+			move_this_load = IRQ_DELTA(max_loaded,j);
-+			selected_irq = j;
-+		}
-+	}
-+	if (selected_irq == -1) {
-+		goto tryanothercpu;
-+	}
-+
-+	imbalance = move_this_load;
-+	
-+	/* For physical_balance case, we accumlated both load
-+	 * values in the one of the siblings cpu_irq[],
-+	 * to use the same code for physical and logical processors
-+	 * as much as possible. 
-+	 *
-+	 * NOTE: the cpu_irq[] array holds the sum of the load for
-+	 * sibling A and sibling B in the slot for the lowest numbered
-+	 * sibling (A), _AND_ the load for sibling B in the slot for
-+	 * the higher numbered sibling.
-+	 *
-+	 * We seek the least loaded sibling by making the comparison
-+	 * (A+B)/2 vs B
-+	 */
-+	load = CPU_IRQ(min_loaded) >> 1;
-+	for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
-+		if (load > CPU_IRQ(j)) {
-+			/* This won't change cpu_sibling_map[min_loaded] */
-+			load = CPU_IRQ(j);
-+			min_loaded = j;
-+		}
-+	}
-+
-+	cpus_and(allowed_mask,
-+		 cpu_online_map,
-+		 balance_irq_affinity[selected_irq]);
-+	target_cpu_mask = cpumask_of_cpu(min_loaded);
-+	cpus_and(tmp, target_cpu_mask, allowed_mask);
-+
-+	if (!cpus_empty(tmp)) {
-+
-+		Dprintk("irq = %d moved to cpu = %d\n",
-+				selected_irq, min_loaded);
-+		/* mark for change destination */
-+		set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
-+
-+		/* Since we made a change, come back sooner to 
-+		 * check for more variation.
-+		 */
-+		balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
-+			balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);	
-+		return;
-+	}
-+	goto tryanotherirq;
-+
-+not_worth_the_effort:
-+	/*
-+	 * if we did not find an IRQ to move, then adjust the time interval
-+	 * upward
-+	 */
-+	balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
-+		balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);	
-+	Dprintk("IRQ worth rotating not found\n");
-+	return;
-+}
-+
-+static int balanced_irq(void *unused)
-+{
-+	int i;
-+	unsigned long prev_balance_time = jiffies;
-+	long time_remaining = balanced_irq_interval;
-+
-+	daemonize("kirqd");
-+	
-+	/* push everything to CPU 0 to give us a starting point.  */
-+	for (i = 0 ; i < NR_IRQS ; i++) {
-+		irq_desc[i].pending_mask[i] = cpumask_of_cpu(0);
-+		set_pending_irq(i, cpumask_of_cpu(0));
-+	}
-+
-+	for ( ; ; ) {
-+		time_remaining = schedule_timeout_interruptible(time_remaining);
-+		try_to_freeze();
-+		if (time_after(jiffies,
-+				prev_balance_time+balanced_irq_interval)) {
-+			preempt_disable();
-+			do_irq_balance();
-+			prev_balance_time = jiffies;
-+			time_remaining = balanced_irq_interval;
-+			preempt_enable();
-+		}
-+	}
-+	return 0;
-+}
-+
-+static int __init balanced_irq_init(void)
-+{
-+	int i;
-+	struct cpuinfo_x86 *c;
-+	cpumask_t tmp;
-+
-+	cpus_shift_right(tmp, cpu_online_map, 2);
-+        c = &boot_cpu_data;
-+	/* When not overwritten by the command line ask subarchitecture. */
-+	if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
-+		irqbalance_disabled = NO_BALANCE_IRQ;
-+	if (irqbalance_disabled)
-+		return 0;
-+	
-+	 /* disable irqbalance completely if there is only one processor online */
-+	if (num_online_cpus() < 2) {
-+		irqbalance_disabled = 1;
-+		return 0;
-+	}
-+	/*
-+	 * Enable physical balance only if more than 1 physical processor
-+	 * is present
-+	 */
-+	if (smp_num_siblings > 1 && !cpus_empty(tmp))
-+		physical_balance = 1;
-+
-+	for_each_online_cpu(i) {
-+		irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
-+		irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
-+		if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
-+			printk(KERN_ERR "balanced_irq_init: out of memory");
-+			goto failed;
-+		}
-+		memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
-+		memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
-+	}
-+	
-+	printk(KERN_INFO "Starting balanced_irq\n");
-+	if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0) 
-+		return 0;
-+	else 
-+		printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
-+failed:
-+	for_each_possible_cpu(i) {
-+		kfree(irq_cpu_data[i].irq_delta);
-+		irq_cpu_data[i].irq_delta = NULL;
-+		kfree(irq_cpu_data[i].last_irq);
-+		irq_cpu_data[i].last_irq = NULL;
-+	}
-+	return 0;
-+}
-+
-+int __init irqbalance_disable(char *str)
-+{
-+	irqbalance_disabled = 1;
-+	return 1;
-+}
-+
-+__setup("noirqbalance", irqbalance_disable);
-+
-+late_initcall(balanced_irq_init);
-+#endif /* CONFIG_IRQBALANCE */
-+#endif /* CONFIG_SMP */
-+#endif
-+
-+#ifndef CONFIG_SMP
-+void fastcall send_IPI_self(int vector)
-+{
-+#ifndef CONFIG_XEN
-+	unsigned int cfg;
-+
-+	/*
-+	 * Wait for idle.
-+	 */
-+	apic_wait_icr_idle();
-+	cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
-+	/*
-+	 * Send the IPI. The write to APIC_ICR fires this off.
-+	 */
-+	apic_write_around(APIC_ICR, cfg);
-+#endif
-+}
-+#endif /* !CONFIG_SMP */
-+
-+
-+/*
-+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
-+ * specific CPU-side IRQs.
-+ */
-+
-+#define MAX_PIRQS 8
-+static int pirq_entries [MAX_PIRQS];
-+static int pirqs_enabled;
-+int skip_ioapic_setup;
-+
-+static int __init ioapic_setup(char *str)
-+{
-+	skip_ioapic_setup = 1;
-+	return 1;
-+}
-+
-+__setup("noapic", ioapic_setup);
-+
-+static int __init ioapic_pirq_setup(char *str)
-+{
-+	int i, max;
-+	int ints[MAX_PIRQS+1];
-+
-+	get_options(str, ARRAY_SIZE(ints), ints);
-+
-+	for (i = 0; i < MAX_PIRQS; i++)
-+		pirq_entries[i] = -1;
-+
-+	pirqs_enabled = 1;
-+	apic_printk(APIC_VERBOSE, KERN_INFO
-+			"PIRQ redirection, working around broken MP-BIOS.\n");
-+	max = MAX_PIRQS;
-+	if (ints[0] < MAX_PIRQS)
-+		max = ints[0];
-+
-+	for (i = 0; i < max; i++) {
-+		apic_printk(APIC_VERBOSE, KERN_DEBUG
-+				"... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
-+		/*
-+		 * PIRQs are mapped upside down, usually.
-+		 */
-+		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
-+	}
-+	return 1;
-+}
-+
-+__setup("pirq=", ioapic_pirq_setup);
-+
-+/*
-+ * Find the IRQ entry number of a certain pin.
-+ */
-+static int find_irq_entry(int apic, int pin, int type)
-+{
-+	int i;
-+
-+	for (i = 0; i < mp_irq_entries; i++)
-+		if (mp_irqs[i].mpc_irqtype == type &&
-+		    (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
-+		     mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
-+		    mp_irqs[i].mpc_dstirq == pin)
-+			return i;
-+
-+	return -1;
-+}
-+
-+/*
-+ * Find the pin to which IRQ[irq] (ISA) is connected
-+ */
-+static int __init find_isa_irq_pin(int irq, int type)
-+{
-+	int i;
-+
-+	for (i = 0; i < mp_irq_entries; i++) {
-+		int lbus = mp_irqs[i].mpc_srcbus;
-+
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_NEC98
-+		    ) &&
-+		    (mp_irqs[i].mpc_irqtype == type) &&
-+		    (mp_irqs[i].mpc_srcbusirq == irq))
-+
-+			return mp_irqs[i].mpc_dstirq;
-+	}
-+	return -1;
-+}
-+
-+static int __init find_isa_irq_apic(int irq, int type)
-+{
-+	int i;
-+
-+	for (i = 0; i < mp_irq_entries; i++) {
-+		int lbus = mp_irqs[i].mpc_srcbus;
-+
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_NEC98
-+		    ) &&
-+		    (mp_irqs[i].mpc_irqtype == type) &&
-+		    (mp_irqs[i].mpc_srcbusirq == irq))
-+			break;
-+	}
-+	if (i < mp_irq_entries) {
-+		int apic;
-+		for(apic = 0; apic < nr_ioapics; apic++) {
-+			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
-+				return apic;
-+		}
-+	}
-+
-+	return -1;
-+}
-+
-+/*
-+ * Find a specific PCI IRQ entry.
-+ * Not an __init, possibly needed by modules
-+ */
-+static int pin_2_irq(int idx, int apic, int pin);
-+
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
-+{
-+	int apic, i, best_guess = -1;
-+
-+	apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
-+		"slot:%d, pin:%d.\n", bus, slot, pin);
-+	if (mp_bus_id_to_pci_bus[bus] == -1) {
-+		printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
-+		return -1;
-+	}
-+	for (i = 0; i < mp_irq_entries; i++) {
-+		int lbus = mp_irqs[i].mpc_srcbus;
-+
-+		for (apic = 0; apic < nr_ioapics; apic++)
-+			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
-+			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
-+				break;
-+
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
-+		    !mp_irqs[i].mpc_irqtype &&
-+		    (bus == lbus) &&
-+		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
-+			int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
-+
-+			if (!(apic || IO_APIC_IRQ(irq)))
-+				continue;
-+
-+			if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
-+				return irq;
-+			/*
-+			 * Use the first all-but-pin matching entry as a
-+			 * best-guess fuzzy result for broken mptables.
-+			 */
-+			if (best_guess < 0)
-+				best_guess = irq;
-+		}
-+	}
-+	return best_guess;
-+}
-+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-+
-+/*
-+ * This function currently is only a helper for the i386 smp boot process where 
-+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
-+ * so mask in all cases should simply be TARGET_CPUS
-+ */
-+#ifdef CONFIG_SMP
-+#ifndef CONFIG_XEN
-+void __init setup_ioapic_dest(void)
-+{
-+	int pin, ioapic, irq, irq_entry;
-+
-+	if (skip_ioapic_setup == 1)
-+		return;
-+
-+	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
-+		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
-+			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
-+			if (irq_entry == -1)
-+				continue;
-+			irq = pin_2_irq(irq_entry, ioapic, pin);
-+			set_ioapic_affinity_irq(irq, TARGET_CPUS);
-+		}
-+
-+	}
-+}
-+#endif /* !CONFIG_XEN */
-+#endif
-+
-+/*
-+ * EISA Edge/Level control register, ELCR
-+ */
-+static int EISA_ELCR(unsigned int irq)
-+{
-+	if (irq < 16) {
-+		unsigned int port = 0x4d0 + (irq >> 3);
-+		return (inb(port) >> (irq & 7)) & 1;
-+	}
-+	apic_printk(APIC_VERBOSE, KERN_INFO
-+			"Broken MPtable reports ISA irq %d\n", irq);
-+	return 0;
-+}
-+
-+/* EISA interrupts are always polarity zero and can be edge or level
-+ * trigger depending on the ELCR value.  If an interrupt is listed as
-+ * EISA conforming in the MP table, that means its trigger type must
-+ * be read in from the ELCR */
-+
-+#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
-+#define default_EISA_polarity(idx)	(0)
-+
-+/* ISA interrupts are always polarity zero edge triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_ISA_trigger(idx)	(0)
-+#define default_ISA_polarity(idx)	(0)
-+
-+/* PCI interrupts are always polarity one level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_PCI_trigger(idx)	(1)
-+#define default_PCI_polarity(idx)	(1)
-+
-+/* MCA interrupts are always polarity zero level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_MCA_trigger(idx)	(1)
-+#define default_MCA_polarity(idx)	(0)
-+
-+/* NEC98 interrupts are always polarity zero edge triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_NEC98_trigger(idx)     (0)
-+#define default_NEC98_polarity(idx)    (0)
-+
-+static int __init MPBIOS_polarity(int idx)
-+{
-+	int bus = mp_irqs[idx].mpc_srcbus;
-+	int polarity;
-+
-+	/*
-+	 * Determine IRQ line polarity (high active or low active):
-+	 */
-+	switch (mp_irqs[idx].mpc_irqflag & 3)
-+	{
-+		case 0: /* conforms, ie. bus-type dependent polarity */
-+		{
-+			switch (mp_bus_id_to_type[bus])
-+			{
-+				case MP_BUS_ISA: /* ISA pin */
-+				{
-+					polarity = default_ISA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_EISA: /* EISA pin */
-+				{
-+					polarity = default_EISA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_PCI: /* PCI pin */
-+				{
-+					polarity = default_PCI_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_MCA: /* MCA pin */
-+				{
-+					polarity = default_MCA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_NEC98: /* NEC 98 pin */
-+				{
-+					polarity = default_NEC98_polarity(idx);
-+					break;
-+				}
-+				default:
-+				{
-+					printk(KERN_WARNING "broken BIOS!!\n");
-+					polarity = 1;
-+					break;
-+				}
-+			}
-+			break;
-+		}
-+		case 1: /* high active */
-+		{
-+			polarity = 0;
-+			break;
-+		}
-+		case 2: /* reserved */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			polarity = 1;
-+			break;
-+		}
-+		case 3: /* low active */
-+		{
-+			polarity = 1;
-+			break;
-+		}
-+		default: /* invalid */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			polarity = 1;
-+			break;
-+		}
-+	}
-+	return polarity;
-+}
-+
-+static int MPBIOS_trigger(int idx)
-+{
-+	int bus = mp_irqs[idx].mpc_srcbus;
-+	int trigger;
-+
-+	/*
-+	 * Determine IRQ trigger mode (edge or level sensitive):
-+	 */
-+	switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
-+	{
-+		case 0: /* conforms, ie. bus-type dependent */
-+		{
-+			switch (mp_bus_id_to_type[bus])
-+			{
-+				case MP_BUS_ISA: /* ISA pin */
-+				{
-+					trigger = default_ISA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_EISA: /* EISA pin */
-+				{
-+					trigger = default_EISA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_PCI: /* PCI pin */
-+				{
-+					trigger = default_PCI_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_MCA: /* MCA pin */
-+				{
-+					trigger = default_MCA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_NEC98: /* NEC 98 pin */
-+				{
-+					trigger = default_NEC98_trigger(idx);
-+					break;
-+				}
-+				default:
-+				{
-+					printk(KERN_WARNING "broken BIOS!!\n");
-+					trigger = 1;
-+					break;
-+				}
-+			}
-+			break;
-+		}
-+		case 1: /* edge */
-+		{
-+			trigger = 0;
-+			break;
-+		}
-+		case 2: /* reserved */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			trigger = 1;
-+			break;
-+		}
-+		case 3: /* level */
-+		{
-+			trigger = 1;
-+			break;
-+		}
-+		default: /* invalid */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			trigger = 0;
-+			break;
-+		}
-+	}
-+	return trigger;
-+}
-+
-+static inline int irq_polarity(int idx)
-+{
-+	return MPBIOS_polarity(idx);
-+}
-+
-+static inline int irq_trigger(int idx)
-+{
-+	return MPBIOS_trigger(idx);
-+}
-+
-+static int pin_2_irq(int idx, int apic, int pin)
-+{
-+	int irq, i;
-+	int bus = mp_irqs[idx].mpc_srcbus;
-+
-+	/*
-+	 * Debugging check, we are in big trouble if this message pops up!
-+	 */
-+	if (mp_irqs[idx].mpc_dstirq != pin)
-+		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
-+
-+	switch (mp_bus_id_to_type[bus])
-+	{
-+		case MP_BUS_ISA: /* ISA pin */
-+		case MP_BUS_EISA:
-+		case MP_BUS_MCA:
-+		case MP_BUS_NEC98:
-+		{
-+			irq = mp_irqs[idx].mpc_srcbusirq;
-+			break;
-+		}
-+		case MP_BUS_PCI: /* PCI pin */
-+		{
-+			/*
-+			 * PCI IRQs are mapped in order
-+			 */
-+			i = irq = 0;
-+			while (i < apic)
-+				irq += nr_ioapic_registers[i++];
-+			irq += pin;
-+
-+			/*
-+			 * For MPS mode, so far only needed by ES7000 platform
-+			 */
-+			if (ioapic_renumber_irq)
-+				irq = ioapic_renumber_irq(apic, irq);
-+
-+			break;
-+		}
-+		default:
-+		{
-+			printk(KERN_ERR "unknown bus type %d.\n",bus); 
-+			irq = 0;
-+			break;
-+		}
-+	}
-+
-+	/*
-+	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
-+	 */
-+	if ((pin >= 16) && (pin <= 23)) {
-+		if (pirq_entries[pin-16] != -1) {
-+			if (!pirq_entries[pin-16]) {
-+				apic_printk(APIC_VERBOSE, KERN_DEBUG
-+						"disabling PIRQ%d\n", pin-16);
-+			} else {
-+				irq = pirq_entries[pin-16];
-+				apic_printk(APIC_VERBOSE, KERN_DEBUG
-+						"using PIRQ%d -> IRQ %d\n",
-+						pin-16, irq);
-+			}
-+		}
-+	}
-+	return irq;
-+}
-+
-+static inline int IO_APIC_irq_trigger(int irq)
-+{
-+	int apic, idx, pin;
-+
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+			idx = find_irq_entry(apic,pin,mp_INT);
-+			if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
-+				return irq_trigger(idx);
-+		}
-+	}
-+	/*
-+	 * nonexistent IRQs are edge default
-+	 */
-+	return 0;
-+}
-+
-+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-+u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
-+
-+int assign_irq_vector(int irq)
-+{
-+	struct physdev_irq irq_op;
-+	unsigned long flags;
-+
-+	BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
-+
-+	spin_lock_irqsave(&vector_lock, flags);
-+
-+	if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
-+		spin_unlock_irqrestore(&vector_lock, flags);
-+		return IO_APIC_VECTOR(irq);
-+	}
-+	irq_op.irq = irq;
-+	if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
-+		return -ENOSPC;
-+
-+	vector_irq[irq_op.vector] = irq;
-+	if (irq != AUTO_ASSIGN)
-+		IO_APIC_VECTOR(irq) = irq_op.vector;
-+
-+	spin_unlock_irqrestore(&vector_lock, flags);
-+
-+	return irq_op.vector;
-+}
-+
-+#ifndef CONFIG_XEN
-+static struct hw_interrupt_type ioapic_level_type;
-+static struct hw_interrupt_type ioapic_edge_type;
-+
-+#define IOAPIC_AUTO	-1
-+#define IOAPIC_EDGE	0
-+#define IOAPIC_LEVEL	1
-+
-+static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
-+{
-+	unsigned idx;
-+
-+	idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
-+
-+	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-+			trigger == IOAPIC_LEVEL)
-+		irq_desc[idx].chip = &ioapic_level_type;
-+	else
-+		irq_desc[idx].chip = &ioapic_edge_type;
-+	set_intr_gate(vector, interrupt[idx]);
-+}
-+#else
-+#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
-+#endif
-+
-+static void __init setup_IO_APIC_irqs(void)
-+{
-+	struct IO_APIC_route_entry entry;
-+	int apic, pin, idx, irq, first_notcon = 1, vector;
-+	unsigned long flags;
-+
-+	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-+
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+
-+		/*
-+		 * add it to the IO-APIC irq-routing table:
-+		 */
-+		memset(&entry,0,sizeof(entry));
-+
-+		entry.delivery_mode = INT_DELIVERY_MODE;
-+		entry.dest_mode = INT_DEST_MODE;
-+		entry.mask = 0;				/* enable IRQ */
-+		entry.dest.logical.logical_dest = 
-+					cpu_mask_to_apicid(TARGET_CPUS);
-+
-+		idx = find_irq_entry(apic,pin,mp_INT);
-+		if (idx == -1) {
-+			if (first_notcon) {
-+				apic_printk(APIC_VERBOSE, KERN_DEBUG
-+						" IO-APIC (apicid-pin) %d-%d",
-+						mp_ioapics[apic].mpc_apicid,
-+						pin);
-+				first_notcon = 0;
-+			} else
-+				apic_printk(APIC_VERBOSE, ", %d-%d",
-+					mp_ioapics[apic].mpc_apicid, pin);
-+			continue;
-+		}
-+
-+		entry.trigger = irq_trigger(idx);
-+		entry.polarity = irq_polarity(idx);
-+
-+		if (irq_trigger(idx)) {
-+			entry.trigger = 1;
-+			entry.mask = 1;
-+		}
-+
-+		irq = pin_2_irq(idx, apic, pin);
-+		/*
-+		 * skip adding the timer int on secondary nodes, which causes
-+		 * a small but painful rift in the time-space continuum
-+		 */
-+		if (multi_timer_check(apic, irq))
-+			continue;
-+		else
-+			add_pin_to_irq(irq, apic, pin);
-+
-+		if (/*!apic &&*/ !IO_APIC_IRQ(irq))
-+			continue;
-+
-+		if (IO_APIC_IRQ(irq)) {
-+			vector = assign_irq_vector(irq);
-+			entry.vector = vector;
-+			ioapic_register_intr(irq, vector, IOAPIC_AUTO);
-+		
-+			if (!apic && (irq < 16))
-+				disable_8259A_irq(irq);
-+		}
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+		io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+		set_native_irq_info(irq, TARGET_CPUS);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+	}
-+	}
-+
-+	if (!first_notcon)
-+		apic_printk(APIC_VERBOSE, " not connected.\n");
-+}
-+
-+/*
-+ * Set up the 8259A-master output pin:
-+ */
-+#ifndef CONFIG_XEN
-+static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
-+{
-+	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
-+
-+	memset(&entry,0,sizeof(entry));
-+
-+	disable_8259A_irq(0);
-+
-+	/* mask LVT0 */
-+	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+
-+	/*
-+	 * We use logical delivery to get the timer IRQ
-+	 * to the first CPU.
-+	 */
-+	entry.dest_mode = INT_DEST_MODE;
-+	entry.mask = 0;					/* unmask IRQ now */
-+	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+	entry.delivery_mode = INT_DELIVERY_MODE;
-+	entry.polarity = 0;
-+	entry.trigger = 0;
-+	entry.vector = vector;
-+
-+	/*
-+	 * The timer IRQ doesn't have to know that behind the
-+	 * scene we have a 8259A-master in AEOI mode ...
-+	 */
-+	irq_desc[0].chip = &ioapic_edge_type;
-+
-+	/*
-+	 * Add it to the IO-APIC irq-routing table:
-+	 */
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+	io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	enable_8259A_irq(0);
-+}
-+
-+static inline void UNEXPECTED_IO_APIC(void)
-+{
-+}
-+
-+void __init print_IO_APIC(void)
-+{
-+	int apic, i;
-+	union IO_APIC_reg_00 reg_00;
-+	union IO_APIC_reg_01 reg_01;
-+	union IO_APIC_reg_02 reg_02;
-+	union IO_APIC_reg_03 reg_03;
-+	unsigned long flags;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+ 	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
-+	for (i = 0; i < nr_ioapics; i++)
-+		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-+		       mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
-+
-+	/*
-+	 * We are a bit conservative about what we expect.  We have to
-+	 * know about every hardware change ASAP.
-+	 */
-+	printk(KERN_INFO "testing the IO APIC.......................\n");
-+
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_00.raw = io_apic_read(apic, 0);
-+	reg_01.raw = io_apic_read(apic, 1);
-+	if (reg_01.bits.version >= 0x10)
-+		reg_02.raw = io_apic_read(apic, 2);
-+	if (reg_01.bits.version >= 0x20)
-+		reg_03.raw = io_apic_read(apic, 3);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
-+	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
-+	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
-+	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
-+	printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);
-+	if (reg_00.bits.ID >= get_physical_broadcast())
-+		UNEXPECTED_IO_APIC();
-+	if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
-+		UNEXPECTED_IO_APIC();
-+
-+	printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
-+	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
-+	if (	(reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
-+		(reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
-+		(reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
-+		(reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
-+		(reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
-+		(reg_01.bits.entries != 0x2E) &&
-+		(reg_01.bits.entries != 0x3F)
-+	)
-+		UNEXPECTED_IO_APIC();
-+
-+	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
-+	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
-+	if (	(reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
-+		(reg_01.bits.version != 0x10) && /* oldest IO-APICs */
-+		(reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
-+		(reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
-+		(reg_01.bits.version != 0x20)    /* Intel P64H (82806 AA) */
-+	)
-+		UNEXPECTED_IO_APIC();
-+	if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
-+		UNEXPECTED_IO_APIC();
-+
-+	/*
-+	 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
-+	 * but the value of reg_02 is read as the previous read register
-+	 * value, so ignore it if reg_02 == reg_01.
-+	 */
-+	if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
-+		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
-+		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
-+		if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
-+			UNEXPECTED_IO_APIC();
-+	}
-+
-+	/*
-+	 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
-+	 * or reg_03, but the value of reg_0[23] is read as the previous read
-+	 * register value, so ignore it if reg_03 == reg_0[12].
-+	 */
-+	if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
-+	    reg_03.raw != reg_01.raw) {
-+		printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
-+		printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
-+		if (reg_03.bits.__reserved_1)
-+			UNEXPECTED_IO_APIC();
-+	}
-+
-+	printk(KERN_DEBUG ".... IRQ redirection table:\n");
-+
-+	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
-+			  " Stat Dest Deli Vect:   \n");
-+
-+	for (i = 0; i <= reg_01.bits.entries; i++) {
-+		struct IO_APIC_route_entry entry;
-+
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		*(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
-+		*(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+		printk(KERN_DEBUG " %02x %03X %02X  ",
-+			i,
-+			entry.dest.logical.logical_dest,
-+			entry.dest.physical.physical_dest
-+		);
-+
-+		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
-+			entry.mask,
-+			entry.trigger,
-+			entry.irr,
-+			entry.polarity,
-+			entry.delivery_status,
-+			entry.dest_mode,
-+			entry.delivery_mode,
-+			entry.vector
-+		);
-+	}
-+	}
-+	if (use_pci_vector())
-+		printk(KERN_INFO "Using vector-based indexing\n");
-+	printk(KERN_DEBUG "IRQ to pin mappings:\n");
-+	for (i = 0; i < NR_IRQS; i++) {
-+		struct irq_pin_list *entry = irq_2_pin + i;
-+		if (entry->pin < 0)
-+			continue;
-+ 		if (use_pci_vector() && !platform_legacy_irq(i))
-+			printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
-+		else
-+			printk(KERN_DEBUG "IRQ%d ", i);
-+		for (;;) {
-+			printk("-> %d:%d", entry->apic, entry->pin);
-+			if (!entry->next)
-+				break;
-+			entry = irq_2_pin + entry->next;
-+		}
-+		printk("\n");
-+	}
-+
-+	printk(KERN_INFO ".................................... done.\n");
-+
-+	return;
-+}
-+
-+#if 0
-+
-+static void print_APIC_bitfield (int base)
-+{
-+	unsigned int v;
-+	int i, j;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+	printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
-+	for (i = 0; i < 8; i++) {
-+		v = apic_read(base + i*0x10);
-+		for (j = 0; j < 32; j++) {
-+			if (v & (1<<j))
-+				printk("1");
-+			else
-+				printk("0");
-+		}
-+		printk("\n");
-+	}
-+}
-+
-+void /*__init*/ print_local_APIC(void * dummy)
-+{
-+	unsigned int v, ver, maxlvt;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
-+		smp_processor_id(), hard_smp_processor_id());
-+	v = apic_read(APIC_ID);
-+	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
-+	v = apic_read(APIC_LVR);
-+	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
-+	ver = GET_APIC_VERSION(v);
-+	maxlvt = get_maxlvt();
-+
-+	v = apic_read(APIC_TASKPRI);
-+	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-+
-+	if (APIC_INTEGRATED(ver)) {			/* !82489DX */
-+		v = apic_read(APIC_ARBPRI);
-+		printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-+			v & APIC_ARBPRI_MASK);
-+		v = apic_read(APIC_PROCPRI);
-+		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
-+	}
-+
-+	v = apic_read(APIC_EOI);
-+	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
-+	v = apic_read(APIC_RRR);
-+	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
-+	v = apic_read(APIC_LDR);
-+	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-+	v = apic_read(APIC_DFR);
-+	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
-+	v = apic_read(APIC_SPIV);
-+	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-+
-+	printk(KERN_DEBUG "... APIC ISR field:\n");
-+	print_APIC_bitfield(APIC_ISR);
-+	printk(KERN_DEBUG "... APIC TMR field:\n");
-+	print_APIC_bitfield(APIC_TMR);
-+	printk(KERN_DEBUG "... APIC IRR field:\n");
-+	print_APIC_bitfield(APIC_IRR);
-+
-+	if (APIC_INTEGRATED(ver)) {		/* !82489DX */
-+		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
-+			apic_write(APIC_ESR, 0);
-+		v = apic_read(APIC_ESR);
-+		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
-+	}
-+
-+	v = apic_read(APIC_ICR);
-+	printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
-+	v = apic_read(APIC_ICR2);
-+	printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
-+
-+	v = apic_read(APIC_LVTT);
-+	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-+
-+	if (maxlvt > 3) {                       /* PC is LVT#4. */
-+		v = apic_read(APIC_LVTPC);
-+		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
-+	}
-+	v = apic_read(APIC_LVT0);
-+	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
-+	v = apic_read(APIC_LVT1);
-+	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-+
-+	if (maxlvt > 2) {			/* ERR is LVT#3. */
-+		v = apic_read(APIC_LVTERR);
-+		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
-+	}
-+
-+	v = apic_read(APIC_TMICT);
-+	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
-+	v = apic_read(APIC_TMCCT);
-+	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
-+	v = apic_read(APIC_TDCR);
-+	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-+	printk("\n");
-+}
-+
-+void print_all_local_APICs (void)
-+{
-+	on_each_cpu(print_local_APIC, NULL, 1, 1);
-+}
-+
-+void /*__init*/ print_PIC(void)
-+{
-+	unsigned int v;
-+	unsigned long flags;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+	printk(KERN_DEBUG "\nprinting PIC contents\n");
-+
-+	spin_lock_irqsave(&i8259A_lock, flags);
-+
-+	v = inb(0xa1) << 8 | inb(0x21);
-+	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
-+
-+	v = inb(0xa0) << 8 | inb(0x20);
-+	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
-+
-+	outb(0x0b,0xa0);
-+	outb(0x0b,0x20);
-+	v = inb(0xa0) << 8 | inb(0x20);
-+	outb(0x0a,0xa0);
-+	outb(0x0a,0x20);
-+
-+	spin_unlock_irqrestore(&i8259A_lock, flags);
-+
-+	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
-+
-+	v = inb(0x4d1) << 8 | inb(0x4d0);
-+	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-+}
-+
-+#endif  /*  0  */
-+
-+#else
-+void __init print_IO_APIC(void) { }
-+#endif /* !CONFIG_XEN */
-+
-+static void __init enable_IO_APIC(void)
-+{
-+	union IO_APIC_reg_01 reg_01;
-+	int i8259_apic, i8259_pin;
-+	int i, apic;
-+	unsigned long flags;
-+
-+	for (i = 0; i < PIN_MAP_SIZE; i++) {
-+		irq_2_pin[i].pin = -1;
-+		irq_2_pin[i].next = 0;
-+	}
-+	if (!pirqs_enabled)
-+		for (i = 0; i < MAX_PIRQS; i++)
-+			pirq_entries[i] = -1;
-+
-+	/*
-+	 * The number of IO-APIC IRQ registers (== #pins):
-+	 */
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_01.raw = io_apic_read(apic, 1);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		nr_ioapic_registers[apic] = reg_01.bits.entries+1;
-+	}
-+	for(apic = 0; apic < nr_ioapics; apic++) {
-+		int pin;
-+		/* See if any of the pins is in ExtINT mode */
-+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+			struct IO_APIC_route_entry entry;
-+			spin_lock_irqsave(&ioapic_lock, flags);
-+			*(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+			*(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+			spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+
-+			/* If the interrupt line is enabled and in ExtInt mode
-+			 * I have found the pin where the i8259 is connected.
-+			 */
-+			if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
-+				ioapic_i8259.apic = apic;
-+				ioapic_i8259.pin  = pin;
-+				goto found_i8259;
-+			}
-+		}
-+	}
-+ found_i8259:
-+	/* Look to see what if the MP table has reported the ExtINT */
-+	/* If we could not find the appropriate pin by looking at the ioapic
-+	 * the i8259 probably is not connected the ioapic but give the
-+	 * mptable a chance anyway.
-+	 */
-+	i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
-+	i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
-+	/* Trust the MP table if nothing is setup in the hardware */
-+	if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
-+		printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
-+		ioapic_i8259.pin  = i8259_pin;
-+		ioapic_i8259.apic = i8259_apic;
-+	}
-+	/* Complain if the MP table and the hardware disagree */
-+	if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
-+		(i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
-+	{
-+		printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
-+	}
-+
-+	/*
-+	 * Do not trust the IO-APIC being empty at bootup
-+	 */
-+	clear_IO_APIC();
-+}
-+
-+/*
-+ * Not an __init, needed by the reboot code
-+ */
-+void disable_IO_APIC(void)
-+{
-+	/*
-+	 * Clear the IO-APIC before rebooting:
-+	 */
-+	clear_IO_APIC();
-+
-+#ifndef CONFIG_XEN
-+	/*
-+	 * If the i8259 is routed through an IOAPIC
-+	 * Put that IOAPIC in virtual wire mode
-+	 * so legacy interrupts can be delivered.
-+	 */
-+	if (ioapic_i8259.pin != -1) {
-+		struct IO_APIC_route_entry entry;
-+		unsigned long flags;
-+
-+		memset(&entry, 0, sizeof(entry));
-+		entry.mask            = 0; /* Enabled */
-+		entry.trigger         = 0; /* Edge */
-+		entry.irr             = 0;
-+		entry.polarity        = 0; /* High */
-+		entry.delivery_status = 0;
-+		entry.dest_mode       = 0; /* Physical */
-+		entry.delivery_mode   = dest_ExtINT; /* ExtInt */
-+		entry.vector          = 0;
-+		entry.dest.physical.physical_dest =
-+					GET_APIC_ID(apic_read(APIC_ID));
-+
-+		/*
-+		 * Add it to the IO-APIC irq-routing table:
-+		 */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
-+			*(((int *)&entry)+1));
-+		io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
-+			*(((int *)&entry)+0));
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+	}
-+	disconnect_bsp_APIC(ioapic_i8259.pin != -1);
-+#endif
-+}
-+
-+/*
-+ * function to set the IO-APIC physical IDs based on the
-+ * values stored in the MPC table.
-+ *
-+ * by Matt Domsch <Matt_Domsch at dell.com>  Tue Dec 21 12:25:05 CST 1999
-+ */
-+
-+#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
-+static void __init setup_ioapic_ids_from_mpc(void)
-+{
-+	union IO_APIC_reg_00 reg_00;
-+	physid_mask_t phys_id_present_map;
-+	int apic;
-+	int i;
-+	unsigned char old_id;
-+	unsigned long flags;
-+
-+	/*
-+	 * Don't check I/O APIC IDs for xAPIC systems.  They have
-+	 * no meaning without the serial APIC bus.
-+	 */
-+	if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-+		|| APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
-+		return;
-+	/*
-+	 * This is broken; anything with a real cpu count has to
-+	 * circumvent this idiocy regardless.
-+	 */
-+	phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
-+
-+	/*
-+	 * Set the IOAPIC ID to the value stored in the MPC table.
-+	 */
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+		/* Read the register 0 value */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_00.raw = io_apic_read(apic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		
-+		old_id = mp_ioapics[apic].mpc_apicid;
-+
-+		if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
-+			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
-+				apic, mp_ioapics[apic].mpc_apicid);
-+			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-+				reg_00.bits.ID);
-+			mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
-+		}
-+
-+		/*
-+		 * Sanity check, is the ID really free? Every APIC in a
-+		 * system must have a unique ID or we get lots of nice
-+		 * 'stuck on smp_invalidate_needed IPI wait' messages.
-+		 */
-+		if (check_apicid_used(phys_id_present_map,
-+					mp_ioapics[apic].mpc_apicid)) {
-+			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
-+				apic, mp_ioapics[apic].mpc_apicid);
-+			for (i = 0; i < get_physical_broadcast(); i++)
-+				if (!physid_isset(i, phys_id_present_map))
-+					break;
-+			if (i >= get_physical_broadcast())
-+				panic("Max APIC ID exceeded!\n");
-+			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-+				i);
-+			physid_set(i, phys_id_present_map);
-+			mp_ioapics[apic].mpc_apicid = i;
-+		} else {
-+			physid_mask_t tmp;
-+			tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
-+			apic_printk(APIC_VERBOSE, "Setting %d in the "
-+					"phys_id_present_map\n",
-+					mp_ioapics[apic].mpc_apicid);
-+			physids_or(phys_id_present_map, phys_id_present_map, tmp);
-+		}
-+
-+
-+		/*
-+		 * We need to adjust the IRQ routing table
-+		 * if the ID changed.
-+		 */
-+		if (old_id != mp_ioapics[apic].mpc_apicid)
-+			for (i = 0; i < mp_irq_entries; i++)
-+				if (mp_irqs[i].mpc_dstapic == old_id)
-+					mp_irqs[i].mpc_dstapic
-+						= mp_ioapics[apic].mpc_apicid;
-+
-+		/*
-+		 * Read the right value from the MPC table and
-+		 * write it into the ID register.
-+	 	 */
-+		apic_printk(APIC_VERBOSE, KERN_INFO
-+			"...changing IO-APIC physical APIC ID to %d ...",
-+			mp_ioapics[apic].mpc_apicid);
-+
-+		reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(apic, 0, reg_00.raw);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+		/*
-+		 * Sanity check
-+		 */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_00.raw = io_apic_read(apic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
-+			printk("could not set ID!\n");
-+		else
-+			apic_printk(APIC_VERBOSE, " ok.\n");
-+	}
-+}
-+#else
-+static void __init setup_ioapic_ids_from_mpc(void) { }
-+#endif
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * There is a nasty bug in some older SMP boards, their mptable lies
-+ * about the timer IRQ. We do the following to work around the situation:
-+ *
-+ *	- timer IRQ defaults to IO-APIC IRQ
-+ *	- if this function detects that timer IRQs are defunct, then we fall
-+ *	  back to ISA timer IRQs
-+ */
-+static int __init timer_irq_works(void)
-+{
-+	unsigned long t1 = jiffies;
-+
-+	local_irq_enable();
-+	/* Let ten ticks pass... */
-+	mdelay((10 * 1000) / HZ);
-+
-+	/*
-+	 * Expect a few ticks at least, to be sure some possible
-+	 * glue logic does not lock up after one or two first
-+	 * ticks in a non-ExtINT mode.  Also the local APIC
-+	 * might have cached one ExtINT interrupt.  Finally, at
-+	 * least one tick may be lost due to delays.
-+	 */
-+	if (jiffies - t1 > 4)
-+		return 1;
-+
-+	return 0;
-+}
-+
-+/*
-+ * In the SMP+IOAPIC case it might happen that there are an unspecified
-+ * number of pending IRQ events unhandled. These cases are very rare,
-+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
-+ * better to do it this way as thus we do not have to be aware of
-+ * 'pending' interrupts in the IRQ path, except at this point.
-+ */
-+/*
-+ * Edge triggered needs to resend any interrupt
-+ * that was delayed but this is now handled in the device
-+ * independent code.
-+ */
-+
-+/*
-+ * Starting up a edge-triggered IO-APIC interrupt is
-+ * nasty - we need to make sure that we get the edge.
-+ * If it is already asserted for some reason, we need
-+ * return 1 to indicate that is was pending.
-+ *
-+ * This is not complete - we should be able to fake
-+ * an edge even if it isn't on the 8259A...
-+ */
-+static unsigned int startup_edge_ioapic_irq(unsigned int irq)
-+{
-+	int was_pending = 0;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	if (irq < 16) {
-+		disable_8259A_irq(irq);
-+		if (i8259A_irq_pending(irq))
-+			was_pending = 1;
-+	}
-+	__unmask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return was_pending;
-+}
-+
-+/*
-+ * Once we have recorded IRQ_PENDING already, we can mask the
-+ * interrupt for real. This prevents IRQ storms from unhandled
-+ * devices.
-+ */
-+static void ack_edge_ioapic_irq(unsigned int irq)
-+{
-+	move_irq(irq);
-+	if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
-+					== (IRQ_PENDING | IRQ_DISABLED))
-+		mask_IO_APIC_irq(irq);
-+	ack_APIC_irq();
-+}
-+
-+/*
-+ * Level triggered interrupts can just be masked,
-+ * and shutting down and starting up the interrupt
-+ * is the same as enabling and disabling them -- except
-+ * with a startup need to return a "was pending" value.
-+ *
-+ * Level triggered interrupts are special because we
-+ * do not touch any IO-APIC register while handling
-+ * them. We ack the APIC in the end-IRQ handler, not
-+ * in the start-IRQ-handler. Protection against reentrance
-+ * from the same interrupt is still provided, both by the
-+ * generic IRQ layer and by the fact that an unacked local
-+ * APIC does not accept IRQs.
-+ */
-+static unsigned int startup_level_ioapic_irq (unsigned int irq)
-+{
-+	unmask_IO_APIC_irq(irq);
-+
-+	return 0; /* don't check for pending */
-+}
-+
-+static void end_level_ioapic_irq (unsigned int irq)
-+{
-+	unsigned long v;
-+	int i;
-+
-+	move_irq(irq);
-+/*
-+ * It appears there is an erratum which affects at least version 0x11
-+ * of I/O APIC (that's the 82093AA and cores integrated into various
-+ * chipsets).  Under certain conditions a level-triggered interrupt is
-+ * erroneously delivered as edge-triggered one but the respective IRR
-+ * bit gets set nevertheless.  As a result the I/O unit expects an EOI
-+ * message but it will never arrive and further interrupts are blocked
-+ * from the source.  The exact reason is so far unknown, but the
-+ * phenomenon was observed when two consecutive interrupt requests
-+ * from a given source get delivered to the same CPU and the source is
-+ * temporarily disabled in between.
-+ *
-+ * A workaround is to simulate an EOI message manually.  We achieve it
-+ * by setting the trigger mode to edge and then to level when the edge
-+ * trigger mode gets detected in the TMR of a local APIC for a
-+ * level-triggered interrupt.  We mask the source for the time of the
-+ * operation to prevent an edge-triggered interrupt escaping meanwhile.
-+ * The idea is from Manfred Spraul.  --macro
-+ */
-+	i = IO_APIC_VECTOR(irq);
-+
-+	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
-+
-+	ack_APIC_irq();
-+
-+	if (!(v & (1 << (i & 0x1f)))) {
-+		atomic_inc(&irq_mis_count);
-+		spin_lock(&ioapic_lock);
-+		__mask_and_edge_IO_APIC_irq(irq);
-+		__unmask_and_level_IO_APIC_irq(irq);
-+		spin_unlock(&ioapic_lock);
-+	}
-+}
-+
-+#ifdef CONFIG_PCI_MSI
-+static unsigned int startup_edge_ioapic_vector(unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	return startup_edge_ioapic_irq(irq);
-+}
-+
-+static void ack_edge_ioapic_vector(unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	move_native_irq(vector);
-+	ack_edge_ioapic_irq(irq);
-+}
-+
-+static unsigned int startup_level_ioapic_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	return startup_level_ioapic_irq (irq);
-+}
-+
-+static void end_level_ioapic_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	move_native_irq(vector);
-+	end_level_ioapic_irq(irq);
-+}
-+
-+static void mask_IO_APIC_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	mask_IO_APIC_irq(irq);
-+}
-+
-+static void unmask_IO_APIC_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	unmask_IO_APIC_irq(irq);
-+}
-+
-+#ifdef CONFIG_SMP
-+static void set_ioapic_affinity_vector (unsigned int vector,
-+					cpumask_t cpu_mask)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	set_native_irq_info(vector, cpu_mask);
-+	set_ioapic_affinity_irq(irq, cpu_mask);
-+}
-+#endif
-+#endif
-+
-+static int ioapic_retrigger(unsigned int irq)
-+{
-+	send_IPI_self(IO_APIC_VECTOR(irq));
-+
-+	return 1;
-+}
-+
-+/*
-+ * Level and edge triggered IO-APIC interrupts need different handling,
-+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
-+ * handled with the level-triggered descriptor, but that one has slightly
-+ * more overhead. Level-triggered interrupts cannot be handled with the
-+ * edge-triggered handler, without risking IRQ storms and other ugly
-+ * races.
-+ */
-+static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
-+	.typename 	= "IO-APIC-edge",
-+	.startup 	= startup_edge_ioapic,
-+	.shutdown 	= shutdown_edge_ioapic,
-+	.enable 	= enable_edge_ioapic,
-+	.disable 	= disable_edge_ioapic,
-+	.ack 		= ack_edge_ioapic,
-+	.end 		= end_edge_ioapic,
-+#ifdef CONFIG_SMP
-+	.set_affinity 	= set_ioapic_affinity,
-+#endif
-+	.retrigger	= ioapic_retrigger,
-+};
-+
-+static struct hw_interrupt_type ioapic_level_type __read_mostly = {
-+	.typename 	= "IO-APIC-level",
-+	.startup 	= startup_level_ioapic,
-+	.shutdown 	= shutdown_level_ioapic,
-+	.enable 	= enable_level_ioapic,
-+	.disable 	= disable_level_ioapic,
-+	.ack 		= mask_and_ack_level_ioapic,
-+	.end 		= end_level_ioapic,
-+#ifdef CONFIG_SMP
-+	.set_affinity 	= set_ioapic_affinity,
-+#endif
-+	.retrigger	= ioapic_retrigger,
-+};
-+#endif /* !CONFIG_XEN */
-+
-+static inline void init_IO_APIC_traps(void)
-+{
-+	int irq;
-+
-+	/*
-+	 * NOTE! The local APIC isn't very good at handling
-+	 * multiple interrupts at the same interrupt level.
-+	 * As the interrupt level is determined by taking the
-+	 * vector number and shifting that right by 4, we
-+	 * want to spread these out a bit so that they don't
-+	 * all fall in the same interrupt level.
-+	 *
-+	 * Also, we've got to be careful not to trash gate
-+	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
-+	 */
-+	for (irq = 0; irq < NR_IRQS ; irq++) {
-+		int tmp = irq;
-+		if (use_pci_vector()) {
-+			if (!platform_legacy_irq(tmp))
-+				if ((tmp = vector_to_irq(tmp)) == -1)
-+					continue;
-+		}
-+		if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
-+			/*
-+			 * Hmm.. We don't have an entry for this,
-+			 * so default to an old-fashioned 8259
-+			 * interrupt if we can..
-+			 */
-+			if (irq < 16)
-+				make_8259A_irq(irq);
-+#ifndef CONFIG_XEN
-+			else
-+				/* Strange. Oh, well.. */
-+				irq_desc[irq].chip = &no_irq_type;
-+#endif
-+		}
-+	}
-+}
-+
-+int timer_uses_ioapic_pin_0;
-+
-+#ifndef CONFIG_XEN
-+static void enable_lapic_irq (unsigned int irq)
-+{
-+	unsigned long v;
-+
-+	v = apic_read(APIC_LVT0);
-+	apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
-+}
-+
-+static void disable_lapic_irq (unsigned int irq)
-+{
-+	unsigned long v;
-+
-+	v = apic_read(APIC_LVT0);
-+	apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
-+}
-+
-+static void ack_lapic_irq (unsigned int irq)
-+{
-+	ack_APIC_irq();
-+}
-+
-+static void end_lapic_irq (unsigned int i) { /* nothing */ }
-+
-+static struct hw_interrupt_type lapic_irq_type __read_mostly = {
-+	.typename 	= "local-APIC-edge",
-+	.startup 	= NULL, /* startup_irq() not used for IRQ0 */
-+	.shutdown 	= NULL, /* shutdown_irq() not used for IRQ0 */
-+	.enable 	= enable_lapic_irq,
-+	.disable 	= disable_lapic_irq,
-+	.ack 		= ack_lapic_irq,
-+	.end 		= end_lapic_irq
-+};
-+
-+static void setup_nmi (void)
-+{
-+	/*
-+ 	 * Dirty trick to enable the NMI watchdog ...
-+	 * We put the 8259A master into AEOI mode and
-+	 * unmask on all local APICs LVT0 as NMI.
-+	 *
-+	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
-+	 * is from Maciej W. Rozycki - so we do not have to EOI from
-+	 * the NMI handler or the timer interrupt.
-+	 */ 
-+	apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
-+
-+	on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
-+
-+	apic_printk(APIC_VERBOSE, " done.\n");
-+}
-+
-+/*
-+ * This looks a bit hackish but it's about the only one way of sending
-+ * a few INTA cycles to 8259As and any associated glue logic.  ICR does
-+ * not support the ExtINT mode, unfortunately.  We need to send these
-+ * cycles as some i82489DX-based boards have glue logic that keeps the
-+ * 8259A interrupt line asserted until INTA.  --macro
-+ */
-+static inline void unlock_ExtINT_logic(void)
-+{
-+	int apic, pin, i;
-+	struct IO_APIC_route_entry entry0, entry1;
-+	unsigned char save_control, save_freq_select;
-+	unsigned long flags;
-+
-+	pin  = find_isa_irq_pin(8, mp_INT);
-+	apic = find_isa_irq_apic(8, mp_INT);
-+	if (pin == -1)
-+		return;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	*(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+	*(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+	clear_IO_APIC_pin(apic, pin);
-+
-+	memset(&entry1, 0, sizeof(entry1));
-+
-+	entry1.dest_mode = 0;			/* physical delivery */
-+	entry1.mask = 0;			/* unmask IRQ now */
-+	entry1.dest.physical.physical_dest = hard_smp_processor_id();
-+	entry1.delivery_mode = dest_ExtINT;
-+	entry1.polarity = entry0.polarity;
-+	entry1.trigger = 0;
-+	entry1.vector = 0;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
-+	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	save_control = CMOS_READ(RTC_CONTROL);
-+	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-+	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
-+		   RTC_FREQ_SELECT);
-+	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
-+
-+	i = 100;
-+	while (i-- > 0) {
-+		mdelay(10);
-+		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
-+			i -= 10;
-+	}
-+
-+	CMOS_WRITE(save_control, RTC_CONTROL);
-+	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-+	clear_IO_APIC_pin(apic, pin);
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
-+	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+/*
-+ * This code may look a bit paranoid, but it's supposed to cooperate with
-+ * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
-+ * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
-+ * fanatically on his truly buggy board.
-+ */
-+static inline void check_timer(void)
-+{
-+	int apic1, pin1, apic2, pin2;
-+	int vector;
-+
-+	/*
-+	 * get/set the timer IRQ vector:
-+	 */
-+	disable_8259A_irq(0);
-+	vector = assign_irq_vector(0);
-+	set_intr_gate(vector, interrupt[0]);
-+
-+	/*
-+	 * Subtle, code in do_timer_interrupt() expects an AEOI
-+	 * mode for the 8259A whenever interrupts are routed
-+	 * through I/O APICs.  Also IRQ0 has to be enabled in
-+	 * the 8259A which implies the virtual wire has to be
-+	 * disabled in the local APIC.
-+	 */
-+	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+	init_8259A(1);
-+	timer_ack = 1;
-+	if (timer_over_8254 > 0)
-+		enable_8259A_irq(0);
-+
-+	pin1  = find_isa_irq_pin(0, mp_INT);
-+	apic1 = find_isa_irq_apic(0, mp_INT);
-+	pin2  = ioapic_i8259.pin;
-+	apic2 = ioapic_i8259.apic;
-+
-+	if (pin1 == 0)
-+		timer_uses_ioapic_pin_0 = 1;
-+
-+	printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
-+		vector, apic1, pin1, apic2, pin2);
-+
-+	if (pin1 != -1) {
-+		/*
-+		 * Ok, does IRQ0 through the IOAPIC work?
-+		 */
-+		unmask_IO_APIC_irq(0);
-+		if (timer_irq_works()) {
-+			if (nmi_watchdog == NMI_IO_APIC) {
-+				disable_8259A_irq(0);
-+				setup_nmi();
-+				enable_8259A_irq(0);
-+			}
-+			if (disable_timer_pin_1 > 0)
-+				clear_IO_APIC_pin(0, pin1);
-+			return;
-+		}
-+		clear_IO_APIC_pin(apic1, pin1);
-+		printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
-+				"IO-APIC\n");
-+	}
-+
-+	printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
-+	if (pin2 != -1) {
-+		printk("\n..... (found pin %d) ...", pin2);
-+		/*
-+		 * legacy devices should be connected to IO APIC #0
-+		 */
-+		setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
-+		if (timer_irq_works()) {
-+			printk("works.\n");
-+			if (pin1 != -1)
-+				replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
-+			else
-+				add_pin_to_irq(0, apic2, pin2);
-+			if (nmi_watchdog == NMI_IO_APIC) {
-+				setup_nmi();
-+			}
-+			return;
-+		}
-+		/*
-+		 * Cleanup, just in case ...
-+		 */
-+		clear_IO_APIC_pin(apic2, pin2);
-+	}
-+	printk(" failed.\n");
-+
-+	if (nmi_watchdog == NMI_IO_APIC) {
-+		printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
-+		nmi_watchdog = 0;
-+	}
-+
-+	printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++	# interrupted in sysexit critical
++	addl $0x38,%esp			# Remove cs...ebx from stack frame.
++	# this popped off new frame to reuse the old one, therefore no 
++	# CFI_ADJUST_CFA_OFFSET here
++11:	push %esp
++	CFI_ADJUST_CFA_OFFSET 4
++	call evtchn_do_upcall
++	add  $4,%esp
++	CFI_ADJUST_CFA_OFFSET -4
++	jmp  ret_from_intr
 +
-+	disable_8259A_irq(0);
-+	irq_desc[0].chip = &lapic_irq_type;
-+	apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);	/* Fixed mode */
-+	enable_8259A_irq(0);
++        ALIGN
++restore_all_enable_events:
++	__ENABLE_INTERRUPTS
++scrit:	/**** START OF CRITICAL REGION ****/
++	__TEST_PENDING
++	jnz  14f			# process more events if necessary...
++	RESTORE_REGS
++	addl $4, %esp
++	CFI_ADJUST_CFA_OFFSET -4
++1:	iret
++.section __ex_table,"a"
++	.align 4
++	.long 1b,iret_exc
++.previous
++14:	__DISABLE_INTERRUPTS
++	jmp  11b
++ecrit:  /**** END OF CRITICAL REGION ****/
++# [How we do the fixup]. We want to merge the current stack frame with the
++# just-interrupted frame. How we do this depends on where in the critical
++# region the interrupted handler was executing, and so how many saved
++# registers are in each frame. We do this quickly using the lookup table
++# 'critical_fixup_table'. For each byte offset in the critical region, it
++# provides the number of bytes which have already been popped from the
++# interrupted stack frame.
++critical_region_fixup:
++	addl $critical_fixup_table-scrit,%eax
++	movzbl (%eax),%eax		# %eax contains num bytes popped
++	cmpb $0xff,%al                  # 0xff => vcpu_info critical region
++	jne  15f
++	GET_THREAD_INFO(%ebp)
++        xorl %eax,%eax
++15:	mov  %esp,%esi
++	add  %eax,%esi			# %esi points at end of src region
++	mov  %esp,%edi
++	add  $0x38,%edi			# %edi points at end of dst region
++	mov  %eax,%ecx
++	shr  $2,%ecx			# convert words to bytes
++	je   17f			# skip loop if nothing to copy
++16:	subl $4,%esi			# pre-decrementing copy loop
++	subl $4,%edi
++	movl (%esi),%eax
++	movl %eax,(%edi)
++	loop 16b
++17:	movl %edi,%esp			# final %edi is top of merged stack
++	# this popped off new frame to reuse the old one, therefore no 
++	# CFI_DEF_CFA_OFFSET here
++	jmp  11b
++	CFI_ENDPROC
 +
-+	if (timer_irq_works()) {
-+		printk(" works.\n");
-+		return;
-+	}
-+	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
-+	printk(" failed.\n");
++critical_fixup_table:
++	.byte 0xff,0xff,0xff		# testb $0xff,(%esi) = __TEST_PENDING
++	.byte 0xff,0xff			# jnz  14f
++	.byte 0x00			# pop  %ebx
++	.byte 0x04			# pop  %ecx
++	.byte 0x08			# pop  %edx
++	.byte 0x0c			# pop  %esi
++	.byte 0x10			# pop  %edi
++	.byte 0x14			# pop  %ebp
++	.byte 0x18			# pop  %eax
++	.byte 0x1c			# pop  %ds
++	.byte 0x20			# pop  %es
++	.byte 0x24			# pop  %gs
++	.byte 0x28,0x28,0x28		# add  $4,%esp
++	.byte 0x2c			# iret
++	.byte 0xff,0xff,0xff,0xff	# movb $1,1(%esi)
++	.byte 0x00,0x00			# jmp  11b
 +
-+	printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++# Hypervisor uses this for application faults while it executes.
++# We get here for two reasons:
++#  1. Fault while reloading DS, ES, FS or GS
++#  2. Fault while executing IRET
++# Category 1 we fix up by reattempting the load, and zeroing the segment
++# register if the load fails.
++# Category 2 we fix up by jumping to do_iret_error. We cannot use the
++# normal Linux return path in this case because if we use the IRET hypercall
++# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
++# We distinguish between categories by maintaining a status value in EAX.
++ENTRY(failsafe_callback)
++	RING0_INT_FRAME
++	pushl %eax
++	CFI_ADJUST_CFA_OFFSET 4
++	movl $1,%eax
++1:	mov 4(%esp),%ds
++2:	mov 8(%esp),%es
++3:	mov 12(%esp),%fs
++4:	mov 16(%esp),%gs
++	testl %eax,%eax
++	popl %eax
++	CFI_ADJUST_CFA_OFFSET -4
++	jz 5f
++	addl $16,%esp		# EAX != 0 => Category 2 (Bad IRET)
++	CFI_ADJUST_CFA_OFFSET -16
++	jmp iret_exc
++	CFI_ADJUST_CFA_OFFSET 16
++5:	addl $16,%esp		# EAX == 0 => Category 1 (Bad segment)
++	CFI_ADJUST_CFA_OFFSET -16
++	pushl $0
++	CFI_ADJUST_CFA_OFFSET 4
++	SAVE_ALL
++	jmp ret_from_exception
++.section .fixup,"ax";		\
++6:	xorl %eax,%eax;		\
++	movl %eax,4(%esp);	\
++	jmp 1b;			\
++7:	xorl %eax,%eax;		\
++	movl %eax,8(%esp);	\
++	jmp 2b;			\
++8:	xorl %eax,%eax;		\
++	movl %eax,12(%esp);	\
++	jmp 3b;			\
++9:	xorl %eax,%eax;		\
++	movl %eax,16(%esp);	\
++	jmp 4b;			\
++.previous;			\
++.section __ex_table,"a";	\
++	.align 4;		\
++	.long 1b,6b;		\
++	.long 2b,7b;		\
++	.long 3b,8b;		\
++	.long 4b,9b;		\
++.previous
++	CFI_ENDPROC
++#endif
 +
-+	timer_ack = 0;
-+	init_8259A(0);
-+	make_8259A_irq(0);
-+	apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
++ENTRY(coprocessor_error)
++	RING0_INT_FRAME
++	pushl $0
++	CFI_ADJUST_CFA_OFFSET 4
++	pushl $do_coprocessor_error
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
 +
-+	unlock_ExtINT_logic();
++ENTRY(simd_coprocessor_error)
++	RING0_INT_FRAME
++	pushl $0
++	CFI_ADJUST_CFA_OFFSET 4
++	pushl $do_simd_coprocessor_error
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
 +
-+	if (timer_irq_works()) {
-+		printk(" works.\n");
-+		return;
-+	}
-+	printk(" failed :(.\n");
-+	panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
-+		"report.  Then try booting with the 'noapic' option");
-+}
-+#else
-+#define check_timer() ((void)0)
++ENTRY(device_not_available)
++	RING0_INT_FRAME
++	pushl $-1			# mark this as an int
++	CFI_ADJUST_CFA_OFFSET 4
++	SAVE_ALL
++#ifndef CONFIG_XEN
++	GET_CR0_INTO_EAX
++	testl $0x4, %eax		# EM (math emulation bit)
++	je device_available_emulate
++	pushl $0			# temporary storage for ORIG_EIP
++	CFI_ADJUST_CFA_OFFSET 4
++	call math_emulate
++	addl $4, %esp
++	CFI_ADJUST_CFA_OFFSET -4
++	jmp ret_from_exception
++device_available_emulate:
 +#endif
++	preempt_stop(CLBR_ANY)
++	call math_state_restore
++	jmp ret_from_exception
++	CFI_ENDPROC
 +
++#ifndef CONFIG_XEN
 +/*
++ * Debug traps and NMI can happen at the one SYSENTER instruction
++ * that sets up the real kernel stack. Check here, since we can't
++ * allow the wrong stack to be used.
 + *
-+ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
-+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
-+ *   Linux doesn't really care, as it's not actually used
-+ *   for any interrupt handling anyway.
++ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
++ * already pushed 3 words if it hits on the sysenter instruction:
++ * eflags, cs and eip.
++ *
++ * We just load the right stack, and push the three (known) values
++ * by hand onto the new stack - while updating the return eip past
++ * the instruction that would have done it for sysenter.
 + */
-+#define PIC_IRQS	(1 << PIC_CASCADE_IR)
-+
-+void __init setup_IO_APIC(void)
-+{
-+	enable_IO_APIC();
-+
-+	if (acpi_ioapic)
-+		io_apic_irqs = ~0;	/* all IRQs go through IOAPIC */
-+	else
-+		io_apic_irqs = ~PIC_IRQS;
-+
-+	printk("ENABLING IO-APIC IRQs\n");
++#define FIX_STACK(offset, ok, label)		\
++	cmpw $__KERNEL_CS,4(%esp);		\
++	jne ok;					\
++label:						\
++	movl SYSENTER_stack_esp0+offset(%esp),%esp;	\
++	CFI_DEF_CFA esp, 0;			\
++	CFI_UNDEFINED eip;			\
++	pushfl;					\
++	CFI_ADJUST_CFA_OFFSET 4;		\
++	pushl $__KERNEL_CS;			\
++	CFI_ADJUST_CFA_OFFSET 4;		\
++	pushl $sysenter_past_esp;		\
++	CFI_ADJUST_CFA_OFFSET 4;		\
++	CFI_REL_OFFSET eip, 0
++#endif /* CONFIG_XEN */
 +
-+	/*
-+	 * Set up IO-APIC IRQ routing.
-+	 */
-+	if (!acpi_ioapic)
-+		setup_ioapic_ids_from_mpc();
++KPROBE_ENTRY(debug)
++	RING0_INT_FRAME
 +#ifndef CONFIG_XEN
-+	sync_Arb_IDs();
-+#endif
-+	setup_IO_APIC_irqs();
-+	init_IO_APIC_traps();
-+	check_timer();
-+	if (!acpi_ioapic)
-+		print_IO_APIC();
-+}
-+
-+static int __init setup_disable_8254_timer(char *s)
-+{
-+	timer_over_8254 = -1;
-+	return 1;
-+}
-+static int __init setup_enable_8254_timer(char *s)
-+{
-+	timer_over_8254 = 2;
-+	return 1;
-+}
-+
-+__setup("disable_8254_timer", setup_disable_8254_timer);
-+__setup("enable_8254_timer", setup_enable_8254_timer);
++	cmpl $sysenter_entry,(%esp)
++	jne debug_stack_correct
++	FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
++debug_stack_correct:
++#endif /* !CONFIG_XEN */
++	pushl $-1			# mark this as an int
++	CFI_ADJUST_CFA_OFFSET 4
++	SAVE_ALL
++	xorl %edx,%edx			# error code 0
++	movl %esp,%eax			# pt_regs pointer
++	call do_debug
++	jmp ret_from_exception
++	CFI_ENDPROC
++KPROBE_END(debug)
 +
++#ifndef CONFIG_XEN
 +/*
-+ *	Called after all the initialization is done. If we didnt find any
-+ *	APIC bugs then we can allow the modify fast path
++ * NMI is doubly nasty. It can happen _while_ we're handling
++ * a debug fault, and the debug fault hasn't yet been able to
++ * clear up the stack. So we first check whether we got  an
++ * NMI on the sysenter entry path, but after that we need to
++ * check whether we got an NMI on the debug path where the debug
++ * fault happened on the sysenter path.
 + */
-+ 
-+static int __init io_apic_bug_finalize(void)
-+{
-+	if(sis_apic_bug == -1)
-+		sis_apic_bug = 0;
-+	if (is_initial_xendomain()) {
-+		dom0_op_t op = { .cmd = DOM0_PLATFORM_QUIRK };
-+		op.u.platform_quirk.quirk_id = sis_apic_bug ?
-+			QUIRK_IOAPIC_BAD_REGSEL : QUIRK_IOAPIC_GOOD_REGSEL;
-+		HYPERVISOR_dom0_op(&op);
-+	}
-+	return 0;
-+}
++KPROBE_ENTRY(nmi)
++	RING0_INT_FRAME
++	pushl %eax
++	CFI_ADJUST_CFA_OFFSET 4
++	movl %ss, %eax
++	cmpw $__ESPFIX_SS, %ax
++	popl %eax
++	CFI_ADJUST_CFA_OFFSET -4
++	je nmi_espfix_stack
++	cmpl $sysenter_entry,(%esp)
++	je nmi_stack_fixup
++	pushl %eax
++	CFI_ADJUST_CFA_OFFSET 4
++	movl %esp,%eax
++	/* Do not access memory above the end of our stack page,
++	 * it might not exist.
++	 */
++	andl $(THREAD_SIZE-1),%eax
++	cmpl $(THREAD_SIZE-20),%eax
++	popl %eax
++	CFI_ADJUST_CFA_OFFSET -4
++	jae nmi_stack_correct
++	cmpl $sysenter_entry,12(%esp)
++	je nmi_debug_stack_check
++nmi_stack_correct:
++	/* We have a RING0_INT_FRAME here */
++	pushl %eax
++	CFI_ADJUST_CFA_OFFSET 4
++	SAVE_ALL
++	xorl %edx,%edx		# zero error code
++	movl %esp,%eax		# pt_regs pointer
++	call do_nmi
++	jmp restore_nocheck_notrace
++	CFI_ENDPROC
 +
-+late_initcall(io_apic_bug_finalize);
++nmi_stack_fixup:
++	RING0_INT_FRAME
++	FIX_STACK(12,nmi_stack_correct, 1)
++	jmp nmi_stack_correct
 +
-+struct sysfs_ioapic_data {
-+	struct sys_device dev;
-+	struct IO_APIC_route_entry entry[0];
-+};
-+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++nmi_debug_stack_check:
++	/* We have a RING0_INT_FRAME here */
++	cmpw $__KERNEL_CS,16(%esp)
++	jne nmi_stack_correct
++	cmpl $debug,(%esp)
++	jb nmi_stack_correct
++	cmpl $debug_esp_fix_insn,(%esp)
++	ja nmi_stack_correct
++	FIX_STACK(24,nmi_stack_correct, 1)
++	jmp nmi_stack_correct
 +
-+static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+	struct IO_APIC_route_entry *entry;
-+	struct sysfs_ioapic_data *data;
-+	unsigned long flags;
-+	int i;
-+	
-+	data = container_of(dev, struct sysfs_ioapic_data, dev);
-+	entry = data->entry;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+		*(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
-+		*(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
-+	}
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++nmi_espfix_stack:
++	/* We have a RING0_INT_FRAME here.
++	 *
++	 * create the pointer to lss back
++	 */
++	pushl %ss
++	CFI_ADJUST_CFA_OFFSET 4
++	pushl %esp
++	CFI_ADJUST_CFA_OFFSET 4
++	addw $4, (%esp)
++	/* copy the iret frame of 12 bytes */
++	.rept 3
++	pushl 16(%esp)
++	CFI_ADJUST_CFA_OFFSET 4
++	.endr
++	pushl %eax
++	CFI_ADJUST_CFA_OFFSET 4
++	SAVE_ALL
++	FIXUP_ESPFIX_STACK		# %eax == %esp
++	xorl %edx,%edx			# zero error code
++	call do_nmi
++	RESTORE_REGS
++	lss 12+4(%esp), %esp		# back to espfix stack
++	CFI_ADJUST_CFA_OFFSET -24
++1:	INTERRUPT_RETURN
++	CFI_ENDPROC
++.section __ex_table,"a"
++	.align 4
++	.long 1b,iret_exc
++.previous
++KPROBE_END(nmi)
++#else
++KPROBE_ENTRY(nmi)
++	RING0_INT_FRAME
++	pushl %eax
++	CFI_ADJUST_CFA_OFFSET 4
++	SAVE_ALL
++	xorl %edx,%edx		# zero error code
++	movl %esp,%eax		# pt_regs pointer
++	call do_nmi
++	orl  $NMI_MASK, PT_EFLAGS(%esp)
++	jmp restore_all
++	CFI_ENDPROC
++KPROBE_END(nmi)
++#endif
 +
-+	return 0;
-+}
++#ifdef CONFIG_PARAVIRT
++ENTRY(native_iret)
++1:	iret
++.section __ex_table,"a"
++	.align 4
++	.long 1b,iret_exc
++.previous
 +
-+static int ioapic_resume(struct sys_device *dev)
-+{
-+	struct IO_APIC_route_entry *entry;
-+	struct sysfs_ioapic_data *data;
-+	unsigned long flags;
-+	union IO_APIC_reg_00 reg_00;
-+	int i;
-+	
-+	data = container_of(dev, struct sysfs_ioapic_data, dev);
-+	entry = data->entry;
++ENTRY(native_irq_enable_sysexit)
++	sti
++	sysexit
++#endif
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_00.raw = io_apic_read(dev->id, 0);
-+	if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
-+		reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
-+		io_apic_write(dev->id, 0, reg_00.raw);
-+	}
-+	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+		io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
-+		io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
-+	}
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++KPROBE_ENTRY(int3)
++	RING0_INT_FRAME
++	pushl $-1			# mark this as an int
++	CFI_ADJUST_CFA_OFFSET 4
++	SAVE_ALL
++	xorl %edx,%edx		# zero error code
++	movl %esp,%eax		# pt_regs pointer
++	call do_int3
++	jmp ret_from_exception
++	CFI_ENDPROC
++KPROBE_END(int3)
 +
-+	return 0;
-+}
++ENTRY(overflow)
++	RING0_INT_FRAME
++	pushl $0
++	CFI_ADJUST_CFA_OFFSET 4
++	pushl $do_overflow
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
 +
-+static struct sysdev_class ioapic_sysdev_class = {
-+	set_kset_name("ioapic"),
-+	.suspend = ioapic_suspend,
-+	.resume = ioapic_resume,
-+};
++ENTRY(bounds)
++	RING0_INT_FRAME
++	pushl $0
++	CFI_ADJUST_CFA_OFFSET 4
++	pushl $do_bounds
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
 +
-+static int __init ioapic_init_sysfs(void)
-+{
-+	struct sys_device * dev;
-+	int i, size, error = 0;
++ENTRY(invalid_op)
++	RING0_INT_FRAME
++	pushl $0
++	CFI_ADJUST_CFA_OFFSET 4
++	pushl $do_invalid_op
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
 +
-+	error = sysdev_class_register(&ioapic_sysdev_class);
-+	if (error)
-+		return error;
++ENTRY(coprocessor_segment_overrun)
++	RING0_INT_FRAME
++	pushl $0
++	CFI_ADJUST_CFA_OFFSET 4
++	pushl $do_coprocessor_segment_overrun
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
 +
-+	for (i = 0; i < nr_ioapics; i++ ) {
-+		size = sizeof(struct sys_device) + nr_ioapic_registers[i] 
-+			* sizeof(struct IO_APIC_route_entry);
-+		mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
-+		if (!mp_ioapic_data[i]) {
-+			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+			continue;
-+		}
-+		memset(mp_ioapic_data[i], 0, size);
-+		dev = &mp_ioapic_data[i]->dev;
-+		dev->id = i; 
-+		dev->cls = &ioapic_sysdev_class;
-+		error = sysdev_register(dev);
-+		if (error) {
-+			kfree(mp_ioapic_data[i]);
-+			mp_ioapic_data[i] = NULL;
-+			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+			continue;
-+		}
-+	}
++ENTRY(invalid_TSS)
++	RING0_EC_FRAME
++	pushl $do_invalid_TSS
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
 +
-+	return 0;
-+}
++ENTRY(segment_not_present)
++	RING0_EC_FRAME
++	pushl $do_segment_not_present
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
 +
-+device_initcall(ioapic_init_sysfs);
++ENTRY(stack_segment)
++	RING0_EC_FRAME
++	pushl $do_stack_segment
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
++
++KPROBE_ENTRY(general_protection)
++	RING0_EC_FRAME
++	pushl $do_general_protection
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
++KPROBE_END(general_protection)
 +
-+/* --------------------------------------------------------------------------
-+                          ACPI-based IOAPIC Configuration
-+   -------------------------------------------------------------------------- */
++ENTRY(alignment_check)
++	RING0_EC_FRAME
++	pushl $do_alignment_check
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
 +
-+#ifdef CONFIG_ACPI
++ENTRY(divide_error)
++	RING0_INT_FRAME
++	pushl $0			# no error code
++	CFI_ADJUST_CFA_OFFSET 4
++	pushl $do_divide_error
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
 +
-+int __init io_apic_get_unique_id (int ioapic, int apic_id)
-+{
-+#ifndef CONFIG_XEN
-+	union IO_APIC_reg_00 reg_00;
-+	static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
-+	physid_mask_t tmp;
-+	unsigned long flags;
-+	int i = 0;
++#ifdef CONFIG_X86_MCE
++ENTRY(machine_check)
++	RING0_INT_FRAME
++	pushl $0
++	CFI_ADJUST_CFA_OFFSET 4
++	pushl machine_check_vector
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
++#endif
 +
-+	/*
-+	 * The P4 platform supports up to 256 APIC IDs on two separate APIC 
-+	 * buses (one for LAPICs, one for IOAPICs), where predecessors only 
-+	 * supports up to 16 on one shared APIC bus.
-+	 * 
-+	 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
-+	 *      advantage of new APIC bus architecture.
-+	 */
++ENTRY(fixup_4gb_segment)
++	RING0_INT_FRAME
++	pushl $do_fixup_4gb_segment
++	CFI_ADJUST_CFA_OFFSET 4
++	jmp error_code
++	CFI_ENDPROC
 +
-+	if (physids_empty(apic_id_map))
-+		apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
++ENTRY(kernel_thread_helper)
++	pushl $0		# fake return address for unwinder
++	CFI_STARTPROC
++	movl %edx,%eax
++	push %edx
++	CFI_ADJUST_CFA_OFFSET 4
++	call *%ebx
++	push %eax
++	CFI_ADJUST_CFA_OFFSET 4
++	call do_exit
++	CFI_ENDPROC
++ENDPROC(kernel_thread_helper)
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_00.raw = io_apic_read(ioapic, 0);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++.section .rodata,"a"
++#include "syscall_table.S"
 +
-+	if (apic_id >= get_physical_broadcast()) {
-+		printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
-+			"%d\n", ioapic, apic_id, reg_00.bits.ID);
-+		apic_id = reg_00.bits.ID;
-+	}
++syscall_table_size=(.-sys_call_table)
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/fixup.c b/arch/i386/kernel/fixup.c
+--- a/arch/i386/kernel/fixup.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/fixup.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,92 @@
++/******************************************************************************
++ * fixup.c
++ * 
++ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
++ * Used to avoid repeated slow emulation of common instructions used by the
++ * user-space TLS (Thread-Local Storage) libraries.
++ * 
++ * **** NOTE ****
++ *  Issues with the binary rewriting have caused it to be removed. Instead
++ *  we rely on Xen's emulator to boot the kernel, and then print a banner
++ *  message recommending that the user disables /lib/tls.
++ * 
++ * Copyright (c) 2004, K A Fraser
++ * 
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ * 
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ */
 +
-+	/*
-+	 * Every APIC in a system must have a unique ID or we get lots of nice 
-+	 * 'stuck on smp_invalidate_needed IPI wait' messages.
-+	 */
-+	if (check_apicid_used(apic_id_map, apic_id)) {
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/version.h>
 +
-+		for (i = 0; i < get_physical_broadcast(); i++) {
-+			if (!check_apicid_used(apic_id_map, i))
-+				break;
-+		}
++#define DP(_f, _args...) printk(KERN_ALERT "  " _f "\n" , ## _args )
 +
-+		if (i == get_physical_broadcast())
-+			panic("Max apic_id exceeded!\n");
++fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
++{
++#if 0
++	static unsigned long printed = 0;
++	char info[100];
++	int i;
 +
-+		printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
-+			"trying %d\n", ioapic, apic_id, i);
++	if (test_and_set_bit(0, &printed))
++		return;
 +
-+		apic_id = i;
-+	} 
++	HYPERVISOR_vm_assist(
++		VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
 +
-+	tmp = apicid_to_cpu_present(apic_id);
-+	physids_or(apic_id_map, apic_id_map, tmp);
++	sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
 +
-+	if (reg_00.bits.ID != apic_id) {
-+		reg_00.bits.ID = apic_id;
 +
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(ioapic, 0, reg_00.raw);
-+		reg_00.raw = io_apic_read(ioapic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
++	DP("");
++	DP("***************************************************************");
++	DP("***************************************************************");
++	DP("** WARNING: Currently emulating unsupported memory accesses  **");
++	DP("**          in /lib/tls glibc libraries. The emulation is    **");
++	DP("**          slow. To ensure full performance you should      **");
++	DP("**          install a 'xen-friendly' (nosegneg) version of   **");
++	DP("**          the library, or disable tls support by executing **");
++	DP("**          the following as root:                           **");
++	DP("**          mv /lib/tls /lib/tls.disabled                    **");
++	DP("** Offending process: %-38.38s **", info);
++	DP("***************************************************************");
++	DP("***************************************************************");
++	DP("");
 +
-+		/* Sanity check */
-+		if (reg_00.bits.ID != apic_id) {
-+			printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
-+			return -1;
-+		}
++	for (i = 5; i > 0; i--) {
++		touch_softlockup_watchdog();
++		printk("Pausing... %d", i);
++		mdelay(1000);
++		printk("\b\b\b\b\b\b\b\b\b\b\b\b");
 +	}
 +
-+	apic_printk(APIC_VERBOSE, KERN_INFO
-+			"IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
-+#endif /* !CONFIG_XEN */
++	printk("Continuing...\n\n");
++#else
++	if (printk_ratelimit())
++		printk(KERN_WARNING
++		       "4gb seg fixup, process %s (pid %d), cs:ip %02x:%08lx\n",
++		       current->comm, current->tgid, regs->xcs, regs->eip);
++#endif
++}
 +
-+	return apic_id;
++static int __init fixup_init(void)
++{
++	HYPERVISOR_vm_assist(
++		VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
++	return 0;
 +}
++__initcall(fixup_init);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/head-xen.S b/arch/i386/kernel/head-xen.S
+--- a/arch/i386/kernel/head-xen.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/head-xen.S	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,286 @@
 +
 +
-+int __init io_apic_get_version (int ioapic)
-+{
-+	union IO_APIC_reg_01	reg_01;
-+	unsigned long flags;
++.text
++#include <linux/elfnote.h>
++#include <linux/threads.h>
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/boot.h>
++#include <asm/thread_info.h>
++#include <asm/asm-offsets.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/elfnote.h>
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_01.raw = io_apic_read(ioapic, 1);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++/*
++ * References to members of the new_cpu_data structure.
++ */
 +
-+	return reg_01.bits.version;
-+}
++#define X86		new_cpu_data+CPUINFO_x86
++#define X86_VENDOR	new_cpu_data+CPUINFO_x86_vendor
++#define X86_MODEL	new_cpu_data+CPUINFO_x86_model
++#define X86_MASK	new_cpu_data+CPUINFO_x86_mask
++#define X86_HARD_MATH	new_cpu_data+CPUINFO_hard_math
++#define X86_CPUID	new_cpu_data+CPUINFO_cpuid_level
++#define X86_CAPABILITY	new_cpu_data+CPUINFO_x86_capability
++#define X86_VENDOR_ID	new_cpu_data+CPUINFO_x86_vendor_id
 +
++#define VIRT_ENTRY_OFFSET 0x0
++.org VIRT_ENTRY_OFFSET
++ENTRY(startup_32)
 +
-+int __init io_apic_get_redir_entries (int ioapic)
-+{
-+	union IO_APIC_reg_01	reg_01;
-+	unsigned long flags;
++#ifdef CONFIG_PARAVIRT
++        movl %cs, %eax
++        testl $0x3, %eax
++        jnz startup_paravirt
++#endif
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_01.raw = io_apic_read(ioapic, 1);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++	movl %esi,xen_start_info
++	cld
 +
-+	return reg_01.bits.entries;
-+}
++	call setup_pda
 +
++	/* Set up the stack pointer */
++	movl $(init_thread_union+THREAD_SIZE),%esp
 +
-+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
-+{
-+	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
++	/* get vendor info */
++	xorl %eax,%eax			# call CPUID with 0 -> return vendor ID
++	XEN_CPUID
++	movl %eax,X86_CPUID		# save CPUID level
++	movl %ebx,X86_VENDOR_ID		# lo 4 chars
++	movl %edx,X86_VENDOR_ID+4	# next 4 chars
++	movl %ecx,X86_VENDOR_ID+8	# last 4 chars
 +
-+	if (!IO_APIC_IRQ(irq)) {
-+		printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
-+			ioapic);
-+		return -EINVAL;
-+	}
++	movl $1,%eax		# Use the CPUID instruction to get CPU type
++	XEN_CPUID
++	movb %al,%cl		# save reg for future use
++	andb $0x0f,%ah		# mask processor family
++	movb %ah,X86
++	andb $0xf0,%al		# mask model
++	shrb $4,%al
++	movb %al,X86_MODEL
++	andb $0x0f,%cl		# mask mask revision
++	movb %cl,X86_MASK
++	movl %edx,X86_CAPABILITY
 +
-+	/*
-+	 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
-+	 * Note that we mask (disable) IRQs now -- these get enabled when the
-+	 * corresponding device driver registers for this IRQ.
-+	 */
++	movb $1,X86_HARD_MATH
 +
-+	memset(&entry,0,sizeof(entry));
++	xorl %eax,%eax			# Clear FS and LDT
++	movl %eax,%fs
 +
-+	entry.delivery_mode = INT_DELIVERY_MODE;
-+	entry.dest_mode = INT_DEST_MODE;
-+	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+	entry.trigger = edge_level;
-+	entry.polarity = active_high_low;
-+	entry.mask  = 1;
++	movl $(__KERNEL_PDA),%eax
++	mov  %eax,%gs
 +
-+	/*
-+	 * IRQs < 16 are already in the irq_2_pin[] map
-+	 */
-+	if (irq >= 16)
-+		add_pin_to_irq(irq, ioapic, pin);
++	cld			# gcc2 wants the direction flag cleared at all times
 +
-+	entry.vector = assign_irq_vector(irq);
++	call start_kernel
++L6:
++	jmp L6			# main should never return here, but
++				# just in case, we know what happens.
 +
-+	apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
-+		"(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
-+		mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
-+		edge_level, active_high_low);
++/*
++ * Point the GDT at this CPU's PDA.  This will be
++ * cpu_gdt_table and boot_pda.
++ */
++setup_pda:
++	/* get the PDA pointer */
++	movl $boot_pda, %eax
++
++	/* slot the PDA address into the GDT */
++	mov $cpu_gdt_table, %ecx
++	mov %ax, (__KERNEL_PDA+0+2)(%ecx)		/* base & 0x0000ffff */
++	shr $16, %eax
++	mov %al, (__KERNEL_PDA+4+0)(%ecx)		/* base & 0x00ff0000 */
++	mov %ah, (__KERNEL_PDA+4+3)(%ecx)		/* base & 0xff000000 */
++
++	# %esi still points to start_info, and no registers
++	# need to be preserved.
++
++	movl XEN_START_mfn_list(%esi), %ebx
++	movl $(cpu_gdt_table - __PAGE_OFFSET), %eax
++	shrl $PAGE_SHIFT, %eax
++	movl (%ebx,%eax,4), %ecx
++	pushl %ecx			# frame number for set_gdt below
 +
-+	ioapic_register_intr(irq, entry.vector, edge_level);
++	xorl %esi, %esi
++	xorl %edx, %edx
++	shldl $PAGE_SHIFT, %ecx, %edx
++	shll $PAGE_SHIFT, %ecx
++	orl $0x61, %ecx
++	movl $cpu_gdt_table, %ebx
++	movl $__HYPERVISOR_update_va_mapping, %eax
++	int $0x82
++
++	movl $(PAGE_SIZE_asm / 8), %ecx
++	movl %esp, %ebx
++	movl $__HYPERVISOR_set_gdt, %eax
++	int $0x82
 +
-+	if (!ioapic && (irq < 16))
-+		disable_8259A_irq(irq);
++	popl %ecx
++	ret
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
-+	io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
-+	set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++#define HYPERCALL_PAGE_OFFSET 0x1000
++.org HYPERCALL_PAGE_OFFSET
++ENTRY(hypercall_page)
++.skip 0x1000
 +
-+	return 0;
-+}
++/*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
 +
-+#endif /* CONFIG_ACPI */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/ioport-xen.c linux-2.6.18-xen/arch/i386/kernel/ioport-xen.c
---- linux-2.6.18.3/arch/i386/kernel/ioport-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/ioport-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,121 @@
 +/*
-+ *	linux/arch/i386/kernel/ioport.c
-+ *
-+ * This contains the io-permission bitmap code - written by obz, with changes
-+ * by Linus.
++ * BSS section
 + */
++.section ".bss.page_aligned","w"
++ENTRY(empty_zero_page)
++	.fill 4096,1,0
 +
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/capability.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/ioport.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/thread_info.h>
-+#include <xen/interface/physdev.h>
++/*
++ * This starts the data section.
++ */
++.data
++ENTRY(start_pda)
++	.long boot_pda
 +
-+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
-+static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
-+{
-+	unsigned long mask;
-+	unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
-+	unsigned int low_index = base & (BITS_PER_LONG-1);
-+	int length = low_index + extent;
++#ifdef CONFIG_PARAVIRT
++startup_paravirt:
++	cld
++ 	movl $(init_thread_union+THREAD_SIZE),%esp
 +
-+	if (low_index != 0) {
-+		mask = (~0UL << low_index);
-+		if (length < BITS_PER_LONG)
-+			mask &= ~(~0UL << length);
-+		if (new_value)
-+			*bitmap_base++ |= mask;
-+		else
-+			*bitmap_base++ &= ~mask;
-+		length -= BITS_PER_LONG;
-+	}
++	/* We take pains to preserve all the regs. */
++	pushl	%edx
++	pushl	%ecx
++	pushl	%eax
 +
-+	mask = (new_value ? ~0UL : 0UL);
-+	while (length >= BITS_PER_LONG) {
-+		*bitmap_base++ = mask;
-+		length -= BITS_PER_LONG;
-+	}
++	/* paravirt.o is last in link, and that probe fn never returns */
++	pushl	$__start_paravirtprobe
++1:
++	movl	0(%esp), %eax
++	pushl	(%eax)
++	movl	8(%esp), %eax
++	call	*(%esp)
++	popl	%eax
 +
-+	if (length > 0) {
-+		mask = ~(~0UL << length);
-+		if (new_value)
-+			*bitmap_base++ |= mask;
-+		else
-+			*bitmap_base++ &= ~mask;
-+	}
-+}
++	movl	4(%esp), %eax
++	movl	8(%esp), %ecx
++	movl	12(%esp), %edx
 +
++	addl	$4, (%esp)
++	jmp	1b
++#endif
 +
 +/*
-+ * this changes the io permissions bitmap in the current task.
++ * The Global Descriptor Table contains 28 quadwords, per-CPU.
 + */
-+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
-+{
-+	struct thread_struct * t = &current->thread;
-+	unsigned long *bitmap;
-+	struct physdev_set_iobitmap set_iobitmap;
++	.section .data.page_aligned, "aw"
++	.align PAGE_SIZE_asm
++ENTRY(cpu_gdt_table)
++	.quad 0x0000000000000000	/* NULL descriptor */
++	.quad 0x0000000000000000	/* 0x0b reserved */
++	.quad 0x0000000000000000	/* 0x13 reserved */
++	.quad 0x0000000000000000	/* 0x1b reserved */
++	.quad 0x0000000000000000	/* 0x20 unused */
++	.quad 0x0000000000000000	/* 0x28 unused */
++	.quad 0x0000000000000000	/* 0x33 TLS entry 1 */
++	.quad 0x0000000000000000	/* 0x3b TLS entry 2 */
++	.quad 0x0000000000000000	/* 0x43 TLS entry 3 */
++	.quad 0x0000000000000000	/* 0x4b reserved */
++	.quad 0x0000000000000000	/* 0x53 reserved */
++	.quad 0x0000000000000000	/* 0x5b reserved */
 +
-+	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
-+		return -EINVAL;
-+	if (turn_on && !capable(CAP_SYS_RAWIO))
-+		return -EPERM;
++	.quad 0x00cf9a000000ffff	/* 0x60 kernel 4GB code at 0x00000000 */
++	.quad 0x00cf92000000ffff	/* 0x68 kernel 4GB data at 0x00000000 */
++	.quad 0x00cffa000000ffff	/* 0x73 user 4GB code at 0x00000000 */
++	.quad 0x00cff2000000ffff	/* 0x7b user 4GB data at 0x00000000 */
++
++	.quad 0x0000000000000000	/* 0x80 TSS descriptor */
++	.quad 0x0000000000000000	/* 0x88 LDT descriptor */
 +
 +	/*
-+	 * If it's the first ioperm() call in this thread's lifetime, set the
-+	 * IO bitmap up. ioperm() is much less timing critical than clone(),
-+	 * this is why we delay this operation until now:
++	 * Segments used for calling PnP BIOS have byte granularity.
++	 * They code segments and data segments have fixed 64k limits,
++	 * the transfer segment sizes are set at run time.
 +	 */
-+	if (!t->io_bitmap_ptr) {
-+		bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+		if (!bitmap)
-+			return -ENOMEM;
-+
-+		memset(bitmap, 0xff, IO_BITMAP_BYTES);
-+		t->io_bitmap_ptr = bitmap;
-+
-+		set_iobitmap.bitmap   = (char *)bitmap;
-+		set_iobitmap.nr_ports = IO_BITMAP_BITS;
-+		HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
-+	}
++	.quad 0x0000000000000000	/* 0x90 32-bit code */
++	.quad 0x0000000000000000	/* 0x98 16-bit code */
++	.quad 0x0000000000000000	/* 0xa0 16-bit data */
++	.quad 0x0000000000000000	/* 0xa8 16-bit data */
++	.quad 0x0000000000000000	/* 0xb0 16-bit data */
 +
-+	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++	/*
++	 * The APM segments have byte granularity and their bases
++	 * are set at run time.  All have 64k limits.
++	 */
++	.quad 0x0000000000000000	/* 0xb8 APM CS    code */
++	.quad 0x0000000000000000	/* 0xc0 APM CS 16 code (16 bit) */
++	.quad 0x0000000000000000	/* 0xc8 APM DS    data */
 +
-+	return 0;
-+}
++	.quad 0x0000000000000000	/* 0xd0 - ESPFIX SS */
++	.quad 0x00cf92000000ffff	/* 0xd8 - PDA */
++	.quad 0x0000000000000000	/* 0xe0 - unused */
++	.quad 0x0000000000000000	/* 0xe8 - unused */
++	.quad 0x0000000000000000	/* 0xf0 - unused */
++	.quad 0x0000000000000000	/* 0xf8 - GDT entry 31: double-fault TSS */
++	.align PAGE_SIZE_asm
 +
++#ifdef CONFIG_XEN_COMPAT_030002
 +/*
-+ * sys_iopl has to be used when you want to access the IO ports
-+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
-+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
-+ *
-+ * Here we just change the eflags value on the stack: we allow
-+ * only the super-user to do it. This depends on the stack-layout
-+ * on system-call entry - see also fork() and the signal handling
-+ * code.
++ * __xen_guest information
 + */
++.macro utoa value
++ .if (\value) < 0 || (\value) >= 0x10
++	utoa (((\value)>>4)&0x0fffffff)
++ .endif
++ .if ((\value) & 0xf) < 10
++  .byte '0' + ((\value) & 0xf)
++ .else
++  .byte 'A' + ((\value) & 0xf) - 10
++ .endif
++.endm
 +
-+asmlinkage long sys_iopl(unsigned long unused)
-+{
-+	volatile struct pt_regs * regs = (struct pt_regs *) &unused;
-+	unsigned int level = regs->ebx;
-+	struct thread_struct *t = &current->thread;
-+	unsigned int old = (t->iopl >> 12) & 3;
++.section __xen_guest
++	.ascii	"GUEST_OS=linux,GUEST_VER=2.6"
++	.ascii	",XEN_VER=xen-3.0"
++	.ascii	",VIRT_BASE=0x"
++		utoa __PAGE_OFFSET
++	.ascii	",ELF_PADDR_OFFSET=0x"
++		utoa __PAGE_OFFSET
++	.ascii	",VIRT_ENTRY=0x"
++		utoa (__PAGE_OFFSET + LOAD_PHYSICAL_ADDR + VIRT_ENTRY_OFFSET)
++	.ascii	",HYPERCALL_PAGE=0x"
++		utoa ((LOAD_PHYSICAL_ADDR+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
++	.ascii  ",FEATURES=writable_page_tables"
++	.ascii	         "|writable_descriptor_tables"
++	.ascii	         "|auto_translated_physmap"
++	.ascii	         "|pae_pgdir_above_4gb"
++	.ascii	         "|supervisor_mode_kernel"
++#ifdef CONFIG_X86_PAE
++	.ascii	",PAE=yes[extended-cr3]"
++#else
++	.ascii	",PAE=no"
++#endif
++	.ascii	",LOADER=generic"
++	.byte	0
++#endif /* CONFIG_XEN_COMPAT_030002 */
 +
-+	if (level > 3)
-+		return -EINVAL;
-+	/* Trying to gain more privileges? */
-+	if (level > old) {
-+		if (!capable(CAP_SYS_RAWIO))
-+			return -EPERM;
-+	}
-+	t->iopl = level << 12;
-+	set_iopl_mask(t->iopl);
-+	return 0;
-+}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/irq-xen.c linux-2.6.18-xen/arch/i386/kernel/irq-xen.c
---- linux-2.6.18.3/arch/i386/kernel/irq-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/irq-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,324 @@
-+/*
-+ *	linux/arch/i386/kernel/irq.c
-+ *
-+ *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
-+ *
-+ * This file contains the lowest level x86-specific interrupt
-+ * entry, irq-stacks and irq statistics code. All the remaining
-+ * irq logic is done by the generic kernel/irq/ code and
-+ * by the x86-specific irq controller code. (e.g. i8259.c and
-+ * io_apic.c.)
-+ */
++
++	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz, "linux")	
++	ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION,  .asciz, "2.6")
++	ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION,    .asciz, "xen-3.0")
++	ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE,      .long,  __PAGE_OFFSET)
++#ifdef CONFIG_XEN_COMPAT_030002
++	ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET,   .long,  __PAGE_OFFSET)
++#else
++	ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET,   .long,  0)
++#endif /* !CONFIG_XEN_COMPAT_030002 */
++	ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,          .long,  startup_32)
++	ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long,  hypercall_page)
++	ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,       .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
++#ifdef CONFIG_X86_PAE
++	ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE,       .asciz, "yes")
++#else
++	ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE,       .asciz, "no")
++#endif
++	ELFNOTE(Xen, XEN_ELFNOTE_LOADER,         .asciz, "generic")
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/init_task-xen.c b/arch/i386/kernel/init_task-xen.c
+--- a/arch/i386/kernel/init_task-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/init_task-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,51 @@
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/init.h>
++#include <linux/init_task.h>
++#include <linux/fs.h>
++#include <linux/mqueue.h>
 +
 +#include <asm/uaccess.h>
-+#include <linux/module.h>
-+#include <linux/seq_file.h>
-+#include <linux/interrupt.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <linux/delay.h>
++#include <asm/pgtable.h>
++#include <asm/desc.h>
++
++static struct fs_struct init_fs = INIT_FS;
++static struct files_struct init_files = INIT_FILES;
++static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
++static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
++
++#define swapper_pg_dir ((pgd_t *)NULL)
++struct mm_struct init_mm = INIT_MM(init_mm);
++#undef swapper_pg_dir
 +
-+DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
-+EXPORT_PER_CPU_SYMBOL(irq_stat);
++EXPORT_SYMBOL(init_mm);
 +
-+#ifndef CONFIG_X86_LOCAL_APIC
 +/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
++ * Initial thread structure.
++ *
++ * We need to make sure that this is THREAD_SIZE aligned due to the
++ * way process stacks are handled. This is done by having a special
++ * "init_task" linker map entry..
 + */
-+void ack_bad_irq(unsigned int irq)
-+{
-+	printk("unexpected IRQ trap at vector %02x\n", irq);
-+}
-+#endif
++union thread_union init_thread_union 
++	__attribute__((__section__(".data.init_task"))) =
++		{ INIT_THREAD_INFO(init_task) };
 +
-+#ifdef CONFIG_4KSTACKS
 +/*
-+ * per-CPU IRQ handling contexts (thread information and stack)
++ * Initial task structure.
++ *
++ * All other task structs will be allocated on slabs in fork.c
 + */
-+union irq_ctx {
-+	struct thread_info      tinfo;
-+	u32                     stack[THREAD_SIZE/sizeof(u32)];
-+};
++struct task_struct init_task = INIT_TASK(init_task);
 +
-+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
-+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
++EXPORT_SYMBOL(init_task);
++
++#ifndef CONFIG_X86_NO_TSS
++/*
++ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
++ * no more per-task TSS's.
++ */ 
++DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
 +#endif
 +
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/io_apic-xen.c b/arch/i386/kernel/io_apic-xen.c
+--- a/arch/i386/kernel/io_apic-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/io_apic-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,2973 @@
 +/*
-+ * do_IRQ handles all normal device IRQ's (the special
-+ * SMP cross-CPU interrupts have their own specific
-+ * handlers).
++ *	Intel IO-APIC support for multi-Pentium hosts.
++ *
++ *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ *	Many thanks to Stig Venaas for trying out countless experimental
++ *	patches and reporting/debugging problems patiently!
++ *
++ *	(c) 1999, Multiple IO-APIC support, developed by
++ *	Ken-ichi Yaku <yaku at css1.kbnes.nec.co.jp> and
++ *      Hidemi Kishimoto <kisimoto at css1.kbnes.nec.co.jp>,
++ *	further tested and cleaned up by Zach Brown <zab at redhat.com>
++ *	and Ingo Molnar <mingo at redhat.com>
++ *
++ *	Fixes
++ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
++ *					thanks to Eric Gilmore
++ *					and Rolf G. Tews
++ *					for testing these extensively
++ *	Paul Diefenbaugh	:	Added full ACPI support
 + */
-+fastcall unsigned int do_IRQ(struct pt_regs *regs)
-+{	
-+	/* high bit used in ret_from_ code */
-+	int irq = ~regs->orig_eax;
-+#ifdef CONFIG_4KSTACKS
-+	union irq_ctx *curctx, *irqctx;
-+	u32 *isp;
-+#endif
 +
-+	if (unlikely((unsigned)irq >= NR_IRQS)) {
-+		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-+					__FUNCTION__, irq);
-+		BUG();
-+	}
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/compiler.h>
++#include <linux/acpi.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++#include <linux/pci.h>
++#include <linux/msi.h>
++#include <linux/htirq.h>
++#include <linux/freezer.h>
 +
-+	irq_enter();
-+#ifdef CONFIG_DEBUG_STACKOVERFLOW
-+	/* Debugging check for stack overflow: is there less than 1KB free? */
-+	{
-+		long esp;
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/timer.h>
++#include <asm/i8259.h>
++#include <asm/nmi.h>
++#include <asm/msidef.h>
++#include <asm/hypertransport.h>
 +
-+		__asm__ __volatile__("andl %%esp,%0" :
-+					"=r" (esp) : "0" (THREAD_SIZE - 1));
-+		if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
-+			printk("do_IRQ: stack overflow: %ld\n",
-+				esp - sizeof(struct thread_info));
-+			dump_stack();
-+		}
-+	}
-+#endif
++#include <mach_apic.h>
++#include <mach_apicdef.h>
 +
-+#ifdef CONFIG_4KSTACKS
++#include "io_ports.h"
 +
-+	curctx = (union irq_ctx *) current_thread_info();
-+	irqctx = hardirq_ctx[smp_processor_id()];
++#ifdef CONFIG_XEN
 +
-+	/*
-+	 * this is where we switch to the IRQ stack. However, if we are
-+	 * already using the IRQ stack (because we interrupted a hardirq
-+	 * handler) we can't do that and just have to keep using the
-+	 * current stack (which is the irq stack already after all)
-+	 */
-+	if (curctx != irqctx) {
-+		int arg1, arg2, ebx;
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
 +
-+		/* build the stack frame on the IRQ stack */
-+		isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-+		irqctx->tinfo.task = curctx->tinfo.task;
-+		irqctx->tinfo.previous_esp = current_stack_pointer;
++/* Fake i8259 */
++#define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq)  ((void)0)
++#define i8259A_irq_pending(_irq) (0)
 +
-+		/*
-+		 * Copy the softirq bits in preempt_count so that the
-+		 * softirq checks work in the hardirq context.
-+		 */
-+		irqctx->tinfo.preempt_count =
-+			(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
-+			 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
-+
-+		asm volatile(
-+			"       xchgl   %%ebx,%%esp      \n"
-+			"       call    __do_IRQ         \n"
-+			"       movl   %%ebx,%%esp      \n"
-+			: "=a" (arg1), "=d" (arg2), "=b" (ebx)
-+			:  "0" (irq),   "1" (regs),  "2" (isp)
-+			: "memory", "cc", "ecx"
-+		);
-+	} else
-+#endif
-+		__do_IRQ(irq, regs);
++unsigned long io_apic_irqs;
 +
-+	irq_exit();
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++	struct physdev_apic apic_op;
++	int ret;
 +
-+	return 1;
++	apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++	apic_op.reg = reg;
++	ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++	if (ret)
++		return ret;
++	return apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++	struct physdev_apic apic_op;
++
++	apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++	apic_op.reg = reg;
++	apic_op.value = value;
++	HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
 +}
 +
-+#ifdef CONFIG_4KSTACKS
++#define io_apic_read(a,r)    xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#endif /* CONFIG_XEN */
++
++int (*ioapic_renumber_irq)(int ioapic, int irq);
++atomic_t irq_mis_count;
++
++/* Where if anywhere is the i8259 connect in external int mode */
++static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
++
++static DEFINE_SPINLOCK(ioapic_lock);
++static DEFINE_SPINLOCK(vector_lock);
++
++int timer_over_8254 __initdata = 1;
 +
 +/*
-+ * These should really be __section__(".bss.page_aligned") as well, but
-+ * gcc's 3.0 and earlier don't handle that correctly.
++ *	Is the SiS APIC rmw bug present ?
++ *	-1 = don't know, 0 = no, 1 = yes
 + */
-+static char softirq_stack[NR_CPUS * THREAD_SIZE]
-+		__attribute__((__aligned__(THREAD_SIZE)));
-+
-+static char hardirq_stack[NR_CPUS * THREAD_SIZE]
-+		__attribute__((__aligned__(THREAD_SIZE)));
++int sis_apic_bug = -1;
 +
 +/*
-+ * allocate per-cpu stacks for hardirq and for softirq processing
++ * # of IRQ routing registers
 + */
-+void irq_ctx_init(int cpu)
-+{
-+	union irq_ctx *irqctx;
++int nr_ioapic_registers[MAX_IO_APICS];
 +
-+	if (hardirq_ctx[cpu])
-+		return;
++static int disable_timer_pin_1 __initdata;
 +
-+	irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
-+	irqctx->tinfo.task              = NULL;
-+	irqctx->tinfo.exec_domain       = NULL;
-+	irqctx->tinfo.cpu               = cpu;
-+	irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
-+	irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
 +
-+	hardirq_ctx[cpu] = irqctx;
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
 +
-+	irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
-+	irqctx->tinfo.task              = NULL;
-+	irqctx->tinfo.exec_domain       = NULL;
-+	irqctx->tinfo.cpu               = cpu;
-+	irqctx->tinfo.preempt_count     = 0;
-+	irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
++static struct irq_pin_list {
++	int apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
 +
-+	softirq_ctx[cpu] = irqctx;
++#ifndef CONFIG_XEN
++struct io_apic {
++	unsigned int index;
++	unsigned int unused[3];
++	unsigned int data;
++};
 +
-+	printk("CPU %u irqstacks, hard=%p soft=%p\n",
-+		cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
++static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
++{
++	return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
++		+ (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
 +}
 +
-+void irq_ctx_exit(int cpu)
++static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
 +{
-+	hardirq_ctx[cpu] = NULL;
++	struct io_apic __iomem *io_apic = io_apic_base(apic);
++	writel(reg, &io_apic->index);
++	return readl(&io_apic->data);
 +}
 +
-+extern asmlinkage void __do_softirq(void);
-+
-+asmlinkage void do_softirq(void)
++static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
 +{
-+	unsigned long flags;
-+	struct thread_info *curctx;
-+	union irq_ctx *irqctx;
-+	u32 *isp;
-+
-+	if (in_interrupt())
-+		return;
++	struct io_apic __iomem *io_apic = io_apic_base(apic);
++	writel(reg, &io_apic->index);
++	writel(value, &io_apic->data);
++}
 +
-+	local_irq_save(flags);
++/*
++ * Re-write a value: to be used for read-modify-write
++ * cycles where the read already set up the index register.
++ *
++ * Older SiS APIC requires we rewrite the index register
++ */
++static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
++{
++	volatile struct io_apic *io_apic = io_apic_base(apic);
++	if (sis_apic_bug)
++		writel(reg, &io_apic->index);
++	writel(value, &io_apic->data);
++}
++#endif /* !CONFIG_XEN */
 +
-+	if (local_softirq_pending()) {
-+		curctx = current_thread_info();
-+		irqctx = softirq_ctx[smp_processor_id()];
-+		irqctx->tinfo.task = curctx->task;
-+		irqctx->tinfo.previous_esp = current_stack_pointer;
-+
-+		/* build the stack frame on the softirq stack */
-+		isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-+
-+		asm volatile(
-+			"       xchgl   %%ebx,%%esp     \n"
-+			"       call    __do_softirq    \n"
-+			"       movl    %%ebx,%%esp     \n"
-+			: "=b"(isp)
-+			: "0"(isp)
-+			: "memory", "cc", "edx", "ecx", "eax"
-+		);
-+		/*
-+		 * Shouldnt happen, we returned above if in_interrupt():
-+	 	 */
-+		WARN_ON_ONCE(softirq_count());
-+	}
++union entry_union {
++	struct { u32 w1, w2; };
++	struct IO_APIC_route_entry entry;
++};
 +
-+	local_irq_restore(flags);
++static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
++{
++	union entry_union eu;
++	unsigned long flags;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
++	eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++	return eu.entry;
 +}
 +
-+EXPORT_SYMBOL(do_softirq);
-+#endif
-+
 +/*
-+ * Interrupt statistics:
++ * When we write a new IO APIC routing entry, we need to write the high
++ * word first! If the mask bit in the low word is clear, we will enable
++ * the interrupt, and we need to make sure the entry is fully populated
++ * before that happens.
 + */
++static void
++__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++{
++	union entry_union eu;
++	eu.entry = e;
++	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
++	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++}
 +
-+atomic_t irq_err_count;
++static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++{
++	unsigned long flags;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__ioapic_write_entry(apic, pin, e);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
 +
 +/*
-+ * /proc/interrupts printing:
++ * When we mask an IO APIC routing entry, we need to write the low
++ * word first, in order to set the mask bit before we change the
++ * high bits!
 + */
 +
-+int show_interrupts(struct seq_file *p, void *v)
++#ifndef CONFIG_XEN
++static void ioapic_mask_entry(int apic, int pin)
 +{
-+	int i = *(loff_t *) v, j;
-+	struct irqaction * action;
 +	unsigned long flags;
++	union entry_union eu = { .entry.mask = 1 };
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++#endif
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++	static int first_free_entry = NR_IRQS;
++	struct irq_pin_list *entry = irq_2_pin + irq;
++
++	while (entry->next)
++		entry = irq_2_pin + entry->next;
 +
-+	if (i == 0) {
-+		seq_printf(p, "           ");
-+		for_each_online_cpu(j)
-+			seq_printf(p, "CPU%-8d       ",j);
-+		seq_putc(p, '\n');
++	if (entry->pin != -1) {
++		entry->next = first_free_entry;
++		entry = irq_2_pin + entry->next;
++		if (++first_free_entry >= PIN_MAP_SIZE)
++			panic("io_apic.c: whoops");
 +	}
++	entry->apic = apic;
++	entry->pin = pin;
++}
 +
-+	if (i < NR_IRQS) {
-+		spin_lock_irqsave(&irq_desc[i].lock, flags);
-+		action = irq_desc[i].action;
-+		if (!action)
-+			goto skip;
-+		seq_printf(p, "%3d: ",i);
-+#ifndef CONFIG_SMP
-+		seq_printf(p, "%10u ", kstat_irqs(i));
++#ifdef CONFIG_XEN
++#define clear_IO_APIC() ((void)0)
 +#else
-+		for_each_online_cpu(j)
-+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-+#endif
-+		seq_printf(p, " %14s", irq_desc[i].chip->typename);
-+		seq_printf(p, "  %s", action->name);
++/*
++ * Reroute an IRQ to a different pin.
++ */
++static void __init replace_pin_at_irq(unsigned int irq,
++				      int oldapic, int oldpin,
++				      int newapic, int newpin)
++{
++	struct irq_pin_list *entry = irq_2_pin + irq;
 +
-+		for (action=action->next; action; action = action->next)
-+			seq_printf(p, ", %s", action->name);
-+
-+		seq_putc(p, '\n');
-+skip:
-+		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-+	} else if (i == NR_IRQS) {
-+		seq_printf(p, "NMI: ");
-+		for_each_online_cpu(j)
-+			seq_printf(p, "%10u ", nmi_count(j));
-+		seq_putc(p, '\n');
-+#ifdef CONFIG_X86_LOCAL_APIC
-+		seq_printf(p, "LOC: ");
-+		for_each_online_cpu(j)
-+			seq_printf(p, "%10u ",
-+				per_cpu(irq_stat,j).apic_timer_irqs);
-+		seq_putc(p, '\n');
-+#endif
-+		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-+#if defined(CONFIG_X86_IO_APIC)
-+		seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
-+#endif
++	while (1) {
++		if (entry->apic == oldapic && entry->pin == oldpin) {
++			entry->apic = newapic;
++			entry->pin = newpin;
++		}
++		if (!entry->next)
++			break;
++		entry = irq_2_pin + entry->next;
 +	}
-+	return 0;
 +}
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+void fixup_irqs(cpumask_t map)
++static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
 +{
-+	unsigned int irq;
-+	static int warned;
-+
-+	for (irq = 0; irq < NR_IRQS; irq++) {
-+		cpumask_t mask;
-+		if (irq == 2)
-+			continue;
++	struct irq_pin_list *entry = irq_2_pin + irq;
++	unsigned int pin, reg;
 +
-+		cpus_and(mask, irq_desc[irq].affinity, map);
-+		if (any_online_cpu(mask) == NR_CPUS) {
-+			/*printk("Breaking affinity for irq %i\n", irq);*/
-+			mask = map;
-+		}
-+		if (irq_desc[irq].chip->set_affinity)
-+			irq_desc[irq].chip->set_affinity(irq, mask);
-+		else if (irq_desc[irq].action && !(warned++))
-+			printk("Cannot set affinity for irq %i\n", irq);
++	for (;;) {
++		pin = entry->pin;
++		if (pin == -1)
++			break;
++		reg = io_apic_read(entry->apic, 0x10 + pin*2);
++		reg &= ~disable;
++		reg |= enable;
++		io_apic_modify(entry->apic, 0x10 + pin*2, reg);
++		if (!entry->next)
++			break;
++		entry = irq_2_pin + entry->next;
 +	}
-+
-+#if 0
-+	barrier();
-+	/* Ingo Molnar says: "after the IO-APIC masks have been redirected
-+	   [note the nop - the interrupt-enable boundary on x86 is two
-+	   instructions from sti] - to flush out pending hardirqs and
-+	   IPIs. After this point nothing is supposed to reach this CPU." */
-+	__asm__ __volatile__("sti; nop; cli");
-+	barrier();
-+#else
-+	/* That doesn't seem sufficient.  Give it 1ms. */
-+	local_irq_enable();
-+	mdelay(1);
-+	local_irq_disable();
-+#endif
 +}
-+#endif
-+
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/ldt-xen.c linux-2.6.18-xen/arch/i386/kernel/ldt-xen.c
---- linux-2.6.18.3/arch/i386/kernel/ldt-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/ldt-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,270 @@
-+/*
-+ * linux/kernel/ldt.c
-+ *
-+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
-+ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
-+ */
 +
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/slab.h>
++/* mask = 1 */
++static void __mask_IO_APIC_irq (unsigned int irq)
++{
++	__modify_IO_APIC_irq(irq, 0x00010000, 0);
++}
 +
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/ldt.h>
-+#include <asm/desc.h>
-+#include <asm/mmu_context.h>
++/* mask = 0 */
++static void __unmask_IO_APIC_irq (unsigned int irq)
++{
++	__modify_IO_APIC_irq(irq, 0, 0x00010000);
++}
 +
-+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
-+static void flush_ldt(void *null)
++/* mask = 1, trigger = 0 */
++static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
 +{
-+	if (current->active_mm)
-+		load_LDT(&current->active_mm->context);
++	__modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
 +}
-+#endif
 +
-+static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++/* mask = 0, trigger = 1 */
++static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
 +{
-+	void *oldldt;
-+	void *newldt;
-+	int oldsize;
++	__modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
++}
 +
-+	if (mincount <= pc->size)
-+		return 0;
-+	oldsize = pc->size;
-+	mincount = (mincount+511)&(~511);
-+	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
-+		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
-+	else
-+		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++	unsigned long flags;
 +
-+	if (!newldt)
-+		return -ENOMEM;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__mask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
 +
-+	if (oldsize)
-+		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
-+	oldldt = pc->ldt;
-+	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
-+	pc->ldt = newldt;
-+	wmb();
-+	pc->size = mincount;
-+	wmb();
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++	unsigned long flags;
 +
-+	if (reload) {
-+#ifdef CONFIG_SMP
-+		cpumask_t mask;
-+		preempt_disable();
-+#endif
-+		make_pages_readonly(
-+			pc->ldt,
-+			(pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		load_LDT(pc);
-+#ifdef CONFIG_SMP
-+		mask = cpumask_of_cpu(smp_processor_id());
-+		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-+			smp_call_function(flush_ldt, NULL, 1, 1);
-+		preempt_enable();
-+#endif
-+	}
-+	if (oldsize) {
-+		make_pages_writable(
-+			oldldt,
-+			(oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
-+			vfree(oldldt);
-+		else
-+			kfree(oldldt);
-+	}
-+	return 0;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__unmask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
 +}
 +
-+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
 +{
-+	int err = alloc_ldt(new, old->size, 0);
-+	if (err < 0)
-+		return err;
-+	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
-+	make_pages_readonly(
-+		new->ldt,
-+		(new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+		XENFEAT_writable_descriptor_tables);
-+	return 0;
++	struct IO_APIC_route_entry entry;
++	
++	/* Check delivery_mode to be sure we're not clearing an SMI pin */
++	entry = ioapic_read_entry(apic, pin);
++	if (entry.delivery_mode == dest_SMI)
++		return;
++
++	/*
++	 * Disable it in the IO-APIC irq-routing table:
++	 */
++	ioapic_mask_entry(apic, pin);
 +}
 +
-+/*
-+ * we do not have to muck with descriptors here, that is
-+ * done in switch_mm() as needed.
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++static void clear_IO_APIC (void)
 +{
-+	struct mm_struct * old_mm;
-+	int retval = 0;
++	int apic, pin;
 +
-+	init_MUTEX(&mm->context.sem);
-+	mm->context.size = 0;
-+	mm->context.has_foreign_mappings = 0;
-+	old_mm = current->mm;
-+	if (old_mm && old_mm->context.size > 0) {
-+		down(&old_mm->context.sem);
-+		retval = copy_ldt(&mm->context, &old_mm->context);
-+		up(&old_mm->context.sem);
-+	}
-+	return retval;
++	for (apic = 0; apic < nr_ioapics; apic++)
++		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++			clear_IO_APIC_pin(apic, pin);
 +}
 +
-+/*
-+ * No need to lock the MM as we are the last user
-+ */
-+void destroy_context(struct mm_struct *mm)
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
 +{
-+	if (mm->context.size) {
-+		if (mm == current->active_mm)
-+			clear_LDT();
-+		make_pages_writable(
-+			mm->context.ldt,
-+			(mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
-+			vfree(mm->context.ldt);
-+		else
-+			kfree(mm->context.ldt);
-+		mm->context.size = 0;
++	unsigned long flags;
++	int pin;
++	struct irq_pin_list *entry = irq_2_pin + irq;
++	unsigned int apicid_value;
++	cpumask_t tmp;
++	
++	cpus_and(tmp, cpumask, cpu_online_map);
++	if (cpus_empty(tmp))
++		tmp = TARGET_CPUS;
++
++	cpus_and(cpumask, tmp, CPU_MASK_ALL);
++
++	apicid_value = cpu_mask_to_apicid(cpumask);
++	/* Prepare to do the io_apic_write */
++	apicid_value = apicid_value << 24;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	for (;;) {
++		pin = entry->pin;
++		if (pin == -1)
++			break;
++		io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
++		if (!entry->next)
++			break;
++		entry = irq_2_pin + entry->next;
 +	}
++	set_native_irq_info(irq, cpumask);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
 +}
 +
-+static int read_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+	int err;
-+	unsigned long size;
-+	struct mm_struct * mm = current->mm;
++#if defined(CONFIG_IRQBALANCE)
++# include <asm/processor.h>	/* kernel_thread() */
++# include <linux/kernel_stat.h>	/* kstat */
++# include <linux/slab.h>		/* kmalloc() */
++# include <linux/timer.h>	/* time_after() */
++ 
++#ifdef CONFIG_BALANCED_IRQ_DEBUG
++#  define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
++#  define Dprintk(x...) do { TDprintk(x); } while (0)
++# else
++#  define TDprintk(x...) 
++#  define Dprintk(x...) 
++# endif
++
++#define IRQBALANCE_CHECK_ARCH -999
++#define MAX_BALANCED_IRQ_INTERVAL	(5*HZ)
++#define MIN_BALANCED_IRQ_INTERVAL	(HZ/2)
++#define BALANCED_IRQ_MORE_DELTA		(HZ/10)
++#define BALANCED_IRQ_LESS_DELTA		(HZ)
 +
-+	if (!mm->context.size)
-+		return 0;
-+	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
-+		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
++static int physical_balance __read_mostly;
++static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
 +
-+	down(&mm->context.sem);
-+	size = mm->context.size*LDT_ENTRY_SIZE;
-+	if (size > bytecount)
-+		size = bytecount;
++static struct irq_cpu_info {
++	unsigned long * last_irq;
++	unsigned long * irq_delta;
++	unsigned long irq;
++} irq_cpu_data[NR_CPUS];
 +
-+	err = 0;
-+	if (copy_to_user(ptr, mm->context.ldt, size))
-+		err = -EFAULT;
-+	up(&mm->context.sem);
-+	if (err < 0)
-+		goto error_return;
-+	if (size != bytecount) {
-+		/* zero-fill the rest */
-+		if (clear_user(ptr+size, bytecount-size) != 0) {
-+			err = -EFAULT;
-+			goto error_return;
-+		}
-+	}
-+	return bytecount;
-+error_return:
-+	return err;
-+}
++#define CPU_IRQ(cpu)		(irq_cpu_data[cpu].irq)
++#define LAST_CPU_IRQ(cpu,irq)   (irq_cpu_data[cpu].last_irq[irq])
++#define IRQ_DELTA(cpu,irq) 	(irq_cpu_data[cpu].irq_delta[irq])
 +
-+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+	int err;
-+	unsigned long size;
-+	void *address;
++#define IDLE_ENOUGH(cpu,now) \
++	(idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
 +
-+	err = 0;
-+	address = &default_ldt[0];
-+	size = 5*sizeof(struct desc_struct);
-+	if (size > bytecount)
-+		size = bytecount;
++#define IRQ_ALLOWED(cpu, allowed_mask)	cpu_isset(cpu, allowed_mask)
 +
-+	err = size;
-+	if (copy_to_user(ptr, address, size))
-+		err = -EFAULT;
++#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
 +
-+	return err;
-+}
++static cpumask_t balance_irq_affinity[NR_IRQS] = {
++	[0 ... NR_IRQS-1] = CPU_MASK_ALL
++};
 +
-+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
 +{
-+	struct mm_struct * mm = current->mm;
-+	__u32 entry_1, entry_2;
-+	int error;
-+	struct user_desc ldt_info;
-+
-+	error = -EINVAL;
-+	if (bytecount != sizeof(ldt_info))
-+		goto out;
-+	error = -EFAULT; 	
-+	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
-+		goto out;
++	balance_irq_affinity[irq] = mask;
++}
 +
-+	error = -EINVAL;
-+	if (ldt_info.entry_number >= LDT_ENTRIES)
-+		goto out;
-+	if (ldt_info.contents == 3) {
-+		if (oldmode)
-+			goto out;
-+		if (ldt_info.seg_not_present == 0)
-+			goto out;
-+	}
++static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
++			unsigned long now, int direction)
++{
++	int search_idle = 1;
++	int cpu = curr_cpu;
 +
-+	down(&mm->context.sem);
-+	if (ldt_info.entry_number >= mm->context.size) {
-+		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
-+		if (error < 0)
-+			goto out_unlock;
-+	}
++	goto inside;
 +
-+   	/* Allow LDTs to be cleared by the user. */
-+   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
-+		if (oldmode || LDT_empty(&ldt_info)) {
-+			entry_1 = 0;
-+			entry_2 = 0;
-+			goto install;
++	do {
++		if (unlikely(cpu == curr_cpu))
++			search_idle = 0;
++inside:
++		if (direction == 1) {
++			cpu++;
++			if (cpu >= NR_CPUS)
++				cpu = 0;
++		} else {
++			cpu--;
++			if (cpu == -1)
++				cpu = NR_CPUS-1;
 +		}
-+	}
++	} while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
++			(search_idle && !IDLE_ENOUGH(cpu,now)));
 +
-+	entry_1 = LDT_entry_a(&ldt_info);
-+	entry_2 = LDT_entry_b(&ldt_info);
-+	if (oldmode)
-+		entry_2 &= ~(1 << 20);
++	return cpu;
++}
 +
-+	/* Install the new entry ...  */
-+install:
-+	error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
-+				entry_1, entry_2);
++static inline void balance_irq(int cpu, int irq)
++{
++	unsigned long now = jiffies;
++	cpumask_t allowed_mask;
++	unsigned int new_cpu;
++		
++	if (irqbalance_disabled)
++		return; 
 +
-+out_unlock:
-+	up(&mm->context.sem);
-+out:
-+	return error;
++	cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
++	new_cpu = move(cpu, allowed_mask, now, 1);
++	if (cpu != new_cpu) {
++		set_pending_irq(irq, cpumask_of_cpu(new_cpu));
++	}
 +}
 +
-+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
 +{
-+	int ret = -ENOSYS;
-+
-+	switch (func) {
-+	case 0:
-+		ret = read_ldt(ptr, bytecount);
-+		break;
-+	case 1:
-+		ret = write_ldt(ptr, bytecount, 1);
-+		break;
-+	case 2:
-+		ret = read_default_ldt(ptr, bytecount);
-+		break;
-+	case 0x11:
-+		ret = write_ldt(ptr, bytecount, 0);
-+		break;
++	int i, j;
++	Dprintk("Rotating IRQs among CPUs.\n");
++	for_each_online_cpu(i) {
++		for (j = 0; j < NR_IRQS; j++) {
++			if (!irq_desc[j].action)
++				continue;
++			/* Is it a significant load ?  */
++			if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
++						useful_load_threshold)
++				continue;
++			balance_irq(i, j);
++		}
 +	}
-+	return ret;
++	balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++		balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);	
++	return;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/Makefile linux-2.6.18-xen/arch/i386/kernel/Makefile
---- linux-2.6.18.3/arch/i386/kernel/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/kernel/Makefile	2006-11-19 14:26:21.000000000 +0100
-@@ -44,6 +44,12 @@
- 
- obj-$(CONFIG_SCx200)		+= scx200.o
- 
-+ifdef CONFIG_XEN
-+vsyscall_note := vsyscall-note-xen.o
-+else
-+vsyscall_note := vsyscall-note.o
-+endif
 +
- # vsyscall.o contains the vsyscall DSO images as __initdata.
- # We must build both images before we can assemble it.
- # Note: kbuild does not track this dependency due to usage of .incbin
-@@ -65,7 +71,7 @@
- 
- $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
- $(obj)/vsyscall-%.so: $(src)/vsyscall.lds \
--		      $(obj)/vsyscall-%.o $(obj)/vsyscall-note.o FORCE
-+		      $(obj)/vsyscall-%.o $(obj)/$(vsyscall_note) FORCE
- 	$(call if_changed,syscall)
- 
- # We also create a special relocatable object that should mirror the symbol
-@@ -77,8 +83,19 @@
- 
- SYSCFLAGS_vsyscall-syms.o = -r
- $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
--			$(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
-+			$(obj)/vsyscall-sysenter.o $(obj)/$(vsyscall_note) FORCE
- 	$(call if_changed,syscall)
- 
- k8-y                      += ../../x86_64/kernel/k8.o
- 
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
++static void do_irq_balance(void)
++{
++	int i, j;
++	unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
++	unsigned long move_this_load = 0;
++	int max_loaded = 0, min_loaded = 0;
++	int load;
++	unsigned long useful_load_threshold = balanced_irq_interval + 10;
++	int selected_irq;
++	int tmp_loaded, first_attempt = 1;
++	unsigned long tmp_cpu_irq;
++	unsigned long imbalance = 0;
++	cpumask_t allowed_mask, target_cpu_mask, tmp;
 +
-+obj-y += fixup.o
-+microcode-$(subst m,y,$(CONFIG_MICROCODE)) := microcode-xen.o
-+n-obj-xen := i8259.o timers/ reboot.o smpboot.o trampoline.o
++	for_each_possible_cpu(i) {
++		int package_index;
++		CPU_IRQ(i) = 0;
++		if (!cpu_online(i))
++			continue;
++		package_index = CPU_TO_PACKAGEINDEX(i);
++		for (j = 0; j < NR_IRQS; j++) {
++			unsigned long value_now, delta;
++			/* Is this an active IRQ? */
++			if (!irq_desc[j].action)
++				continue;
++			if ( package_index == i )
++				IRQ_DELTA(package_index,j) = 0;
++			/* Determine the total count per processor per IRQ */
++			value_now = (unsigned long) kstat_cpu(i).irqs[j];
 +
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+extra-y := $(call cherrypickxen, $(extra-y))
-+endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/microcode-xen.c linux-2.6.18-xen/arch/i386/kernel/microcode-xen.c
---- linux-2.6.18.3/arch/i386/kernel/microcode-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/microcode-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,147 @@
-+/*
-+ *	Intel CPU Microcode Update Driver for Linux
-+ *
-+ *	Copyright (C) 2000-2004 Tigran Aivazian
-+ *
-+ *	This driver allows to upgrade microcode on Intel processors
-+ *	belonging to IA-32 family - PentiumPro, Pentium II, 
-+ *	Pentium III, Xeon, Pentium 4, etc.
-+ *
-+ *	Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual, 
-+ *	Order Number 245472 or free download from:
-+ *		
-+ *	http://developer.intel.com/design/pentium4/manuals/245472.htm
-+ *
-+ *	For more information, go to http://www.urbanmyth.org/microcode
-+ *
-+ *	This program is free software; you can redistribute it and/or
-+ *	modify it under the terms of the GNU General Public License
-+ *	as published by the Free Software Foundation; either version
-+ *	2 of the License, or (at your option) any later version.
-+ */
++			/* Determine the activity per processor per IRQ */
++			delta = value_now - LAST_CPU_IRQ(i,j);
 +
-+//#define DEBUG /* pr_debug */
-+#include <linux/capability.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/sched.h>
-+#include <linux/cpumask.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/miscdevice.h>
-+#include <linux/spinlock.h>
-+#include <linux/mm.h>
-+#include <linux/mutex.h>
-+#include <linux/syscalls.h>
++			/* Update last_cpu_irq[][] for the next time */
++			LAST_CPU_IRQ(i,j) = value_now;
 +
-+#include <asm/msr.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
++			/* Ignore IRQs whose rate is less than the clock */
++			if (delta < useful_load_threshold)
++				continue;
++			/* update the load for the processor or package total */
++			IRQ_DELTA(package_index,j) += delta;
 +
-+MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
-+MODULE_AUTHOR("Tigran Aivazian <tigran at veritas.com>");
-+MODULE_LICENSE("GPL");
++			/* Keep track of the higher numbered sibling as well */
++			if (i != package_index)
++				CPU_IRQ(i) += delta;
++			/*
++			 * We have sibling A and sibling B in the package
++			 *
++			 * cpu_irq[A] = load for cpu A + load for cpu B
++			 * cpu_irq[B] = load for cpu B
++			 */
++			CPU_IRQ(package_index) += delta;
++		}
++	}
++	/* Find the least loaded processor package */
++	for_each_online_cpu(i) {
++		if (i != CPU_TO_PACKAGEINDEX(i))
++			continue;
++		if (min_cpu_irq > CPU_IRQ(i)) {
++			min_cpu_irq = CPU_IRQ(i);
++			min_loaded = i;
++		}
++	}
++	max_cpu_irq = ULONG_MAX;
 +
-+#define MICROCODE_VERSION 	"1.14-xen"
++tryanothercpu:
++	/* Look for heaviest loaded processor.
++	 * We may come back to get the next heaviest loaded processor.
++	 * Skip processors with trivial loads.
++	 */
++	tmp_cpu_irq = 0;
++	tmp_loaded = -1;
++	for_each_online_cpu(i) {
++		if (i != CPU_TO_PACKAGEINDEX(i))
++			continue;
++		if (max_cpu_irq <= CPU_IRQ(i)) 
++			continue;
++		if (tmp_cpu_irq < CPU_IRQ(i)) {
++			tmp_cpu_irq = CPU_IRQ(i);
++			tmp_loaded = i;
++		}
++	}
 +
-+#define DEFAULT_UCODE_DATASIZE 	(2000) 	  /* 2000 bytes */
-+#define MC_HEADER_SIZE		(sizeof (microcode_header_t))  	  /* 48 bytes */
-+#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
++	if (tmp_loaded == -1) {
++ 	 /* In the case of small number of heavy interrupt sources, 
++	  * loading some of the cpus too much. We use Ingo's original 
++	  * approach to rotate them around.
++	  */
++		if (!first_attempt && imbalance >= useful_load_threshold) {
++			rotate_irqs_among_cpus(useful_load_threshold);
++			return;
++		}
++		goto not_worth_the_effort;
++	}
++	
++	first_attempt = 0;		/* heaviest search */
++	max_cpu_irq = tmp_cpu_irq;	/* load */
++	max_loaded = tmp_loaded;	/* processor */
++	imbalance = (max_cpu_irq - min_cpu_irq) / 2;
++	
++	Dprintk("max_loaded cpu = %d\n", max_loaded);
++	Dprintk("min_loaded cpu = %d\n", min_loaded);
++	Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
++	Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
++	Dprintk("load imbalance = %lu\n", imbalance);
 +
-+/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
-+static DEFINE_MUTEX(microcode_mutex);
++	/* if imbalance is less than approx 10% of max load, then
++	 * observe diminishing returns action. - quit
++	 */
++	if (imbalance < (max_cpu_irq >> 3)) {
++		Dprintk("Imbalance too trivial\n");
++		goto not_worth_the_effort;
++	}
 +
-+static void __user *user_buffer;	/* user area microcode data buffer */
-+static unsigned int user_buffer_size;	/* it's size */
-+				
-+static int microcode_open (struct inode *unused1, struct file *unused2)
-+{
-+	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-+}
++tryanotherirq:
++	/* if we select an IRQ to move that can't go where we want, then
++	 * see if there is another one to try.
++	 */
++	move_this_load = 0;
++	selected_irq = -1;
++	for (j = 0; j < NR_IRQS; j++) {
++		/* Is this an active IRQ? */
++		if (!irq_desc[j].action)
++			continue;
++		if (imbalance <= IRQ_DELTA(max_loaded,j))
++			continue;
++		/* Try to find the IRQ that is closest to the imbalance
++		 * without going over.
++		 */
++		if (move_this_load < IRQ_DELTA(max_loaded,j)) {
++			move_this_load = IRQ_DELTA(max_loaded,j);
++			selected_irq = j;
++		}
++	}
++	if (selected_irq == -1) {
++		goto tryanothercpu;
++	}
 +
++	imbalance = move_this_load;
++	
++	/* For physical_balance case, we accumlated both load
++	 * values in the one of the siblings cpu_irq[],
++	 * to use the same code for physical and logical processors
++	 * as much as possible. 
++	 *
++	 * NOTE: the cpu_irq[] array holds the sum of the load for
++	 * sibling A and sibling B in the slot for the lowest numbered
++	 * sibling (A), _AND_ the load for sibling B in the slot for
++	 * the higher numbered sibling.
++	 *
++	 * We seek the least loaded sibling by making the comparison
++	 * (A+B)/2 vs B
++	 */
++	load = CPU_IRQ(min_loaded) >> 1;
++	for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
++		if (load > CPU_IRQ(j)) {
++			/* This won't change cpu_sibling_map[min_loaded] */
++			load = CPU_IRQ(j);
++			min_loaded = j;
++		}
++	}
 +
-+static int do_microcode_update (void)
-+{
-+	int err;
-+	dom0_op_t op;
++	cpus_and(allowed_mask,
++		cpu_online_map,
++		balance_irq_affinity[selected_irq]);
++	target_cpu_mask = cpumask_of_cpu(min_loaded);
++	cpus_and(tmp, target_cpu_mask, allowed_mask);
 +
-+	err = sys_mlock((unsigned long)user_buffer, user_buffer_size);
-+	if (err != 0)
-+		return err;
++	if (!cpus_empty(tmp)) {
 +
-+	op.cmd = DOM0_MICROCODE;
-+	set_xen_guest_handle(op.u.microcode.data, user_buffer);
-+	op.u.microcode.length = user_buffer_size;
-+	err = HYPERVISOR_dom0_op(&op);
++		Dprintk("irq = %d moved to cpu = %d\n",
++				selected_irq, min_loaded);
++		/* mark for change destination */
++		set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
 +
-+	(void)sys_munlock((unsigned long)user_buffer, user_buffer_size);
++		/* Since we made a change, come back sooner to 
++		 * check for more variation.
++		 */
++		balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++			balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);	
++		return;
++	}
++	goto tryanotherirq;
 +
-+	return err;
++not_worth_the_effort:
++	/*
++	 * if we did not find an IRQ to move, then adjust the time interval
++	 * upward
++	 */
++	balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
++		balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);	
++	Dprintk("IRQ worth rotating not found\n");
++	return;
 +}
 +
-+static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
++static int balanced_irq(void *unused)
 +{
-+	ssize_t ret;
++	int i;
++	unsigned long prev_balance_time = jiffies;
++	long time_remaining = balanced_irq_interval;
 +
-+	if (len < DEFAULT_UCODE_TOTALSIZE) {
-+		printk(KERN_ERR "microcode: not enough data\n"); 
-+		return -EINVAL;
++	daemonize("kirqd");
++	
++	/* push everything to CPU 0 to give us a starting point.  */
++	for (i = 0 ; i < NR_IRQS ; i++) {
++		irq_desc[i].pending_mask = cpumask_of_cpu(0);
++		set_pending_irq(i, cpumask_of_cpu(0));
 +	}
 +
-+	if ((len >> PAGE_SHIFT) > num_physpages) {
-+		printk(KERN_ERR "microcode: too much data (max %ld pages)\n", num_physpages);
-+		return -EINVAL;
++	for ( ; ; ) {
++		time_remaining = schedule_timeout_interruptible(time_remaining);
++		try_to_freeze();
++		if (time_after(jiffies,
++				prev_balance_time+balanced_irq_interval)) {
++			preempt_disable();
++			do_irq_balance();
++			prev_balance_time = jiffies;
++			time_remaining = balanced_irq_interval;
++			preempt_enable();
++		}
 +	}
-+
-+	mutex_lock(&microcode_mutex);
-+
-+	user_buffer = (void __user *) buf;
-+	user_buffer_size = (int) len;
-+
-+	ret = do_microcode_update();
-+	if (!ret)
-+		ret = (ssize_t)len;
-+
-+	mutex_unlock(&microcode_mutex);
-+
-+	return ret;
++	return 0;
 +}
 +
-+static struct file_operations microcode_fops = {
-+	.owner		= THIS_MODULE,
-+	.write		= microcode_write,
-+	.open		= microcode_open,
-+};
-+
-+static struct miscdevice microcode_dev = {
-+	.minor		= MICROCODE_MINOR,
-+	.name		= "microcode",
-+	.fops		= &microcode_fops,
-+};
-+
-+static int __init microcode_init (void)
++static int __init balanced_irq_init(void)
 +{
-+	int error;
++	int i;
++	struct cpuinfo_x86 *c;
++	cpumask_t tmp;
 +
-+	error = misc_register(&microcode_dev);
-+	if (error) {
-+		printk(KERN_ERR
-+			"microcode: can't misc_register on minor=%d\n",
-+			MICROCODE_MINOR);
-+		return error;
++	cpus_shift_right(tmp, cpu_online_map, 2);
++        c = &boot_cpu_data;
++	/* When not overwritten by the command line ask subarchitecture. */
++	if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
++		irqbalance_disabled = NO_BALANCE_IRQ;
++	if (irqbalance_disabled)
++		return 0;
++	
++	 /* disable irqbalance completely if there is only one processor online */
++	if (num_online_cpus() < 2) {
++		irqbalance_disabled = 1;
++		return 0;
 +	}
++	/*
++	 * Enable physical balance only if more than 1 physical processor
++	 * is present
++	 */
++	if (smp_num_siblings > 1 && !cpus_empty(tmp))
++		physical_balance = 1;
 +
-+	printk(KERN_INFO 
-+		"IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran at veritas.com>\n");
++	for_each_online_cpu(i) {
++		irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++		irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++		if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
++			printk(KERN_ERR "balanced_irq_init: out of memory");
++			goto failed;
++		}
++		memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
++		memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
++	}
++	
++	printk(KERN_INFO "Starting balanced_irq\n");
++	if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0) 
++		return 0;
++	else 
++		printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
++failed:
++	for_each_possible_cpu(i) {
++		kfree(irq_cpu_data[i].irq_delta);
++		irq_cpu_data[i].irq_delta = NULL;
++		kfree(irq_cpu_data[i].last_irq);
++		irq_cpu_data[i].last_irq = NULL;
++	}
 +	return 0;
 +}
 +
-+static void __exit microcode_exit (void)
++int __init irqbalance_disable(char *str)
 +{
-+	misc_deregister(&microcode_dev);
++	irqbalance_disabled = 1;
++	return 1;
 +}
 +
-+module_init(microcode_init)
-+module_exit(microcode_exit)
-+MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/mpparse-xen.c linux-2.6.18-xen/arch/i386/kernel/mpparse-xen.c
---- linux-2.6.18.3/arch/i386/kernel/mpparse-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/mpparse-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,1185 @@
-+/*
-+ *	Intel Multiprocessor Specification 1.1 and 1.4
-+ *	compliant MP-table parsing routines.
-+ *
-+ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
-+ *	(c) 1998, 1999, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *		Erich Boleyn	:	MP v1.4 and additional changes.
-+ *		Alan Cox	:	Added EBDA scanning
-+ *		Ingo Molnar	:	various cleanups and rewrites
-+ *		Maciej W. Rozycki:	Bits for default MP configurations
-+ *		Paul Diefenbaugh:	Added full ACPI support
-+ */
++__setup("noirqbalance", irqbalance_disable);
 +
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/acpi.h>
-+#include <linux/delay.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/bitops.h>
++late_initcall(balanced_irq_init);
++#endif /* CONFIG_IRQBALANCE */
++#endif /* CONFIG_SMP */
++#endif /* !CONFIG_XEN */
 +
-+#include <asm/smp.h>
-+#include <asm/acpi.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/io_apic.h>
++#ifndef CONFIG_SMP
++void fastcall send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++	unsigned int cfg;
 +
-+#include <mach_apic.h>
-+#include <mach_mpparse.h>
-+#include <bios_ebda.h>
++	/*
++	 * Wait for idle.
++	 */
++	apic_wait_icr_idle();
++	cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
++	/*
++	 * Send the IPI. The write to APIC_ICR fires this off.
++	 */
++	apic_write_around(APIC_ICR, cfg);
++#endif
++}
++#endif /* !CONFIG_SMP */
 +
-+/* Have we found an MP table */
-+int smp_found_config;
-+unsigned int __initdata maxcpus = NR_CPUS;
 +
 +/*
-+ * Various Linux-internal data structures created from the
-+ * MP-table.
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
 + */
-+int apic_version [MAX_APICS];
-+int mp_bus_id_to_type [MAX_MP_BUSSES];
-+int mp_bus_id_to_node [MAX_MP_BUSSES];
-+int mp_bus_id_to_local [MAX_MP_BUSSES];
-+int quad_local_to_mp_bus_id [NR_CPUS/4][4];
-+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-+static int mp_current_pci_id;
 +
-+/* I/O APIC entries */
-+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
-+
-+/* # of MP IRQ source entries */
-+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-+
-+/* MP IRQ source entries */
-+int mp_irq_entries;
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
 +
-+int nr_ioapics;
++static int __init ioapic_setup(char *str)
++{
++	skip_ioapic_setup = 1;
++	return 1;
++}
 +
-+int pic_mode;
-+unsigned long mp_lapic_addr;
++__setup("noapic", ioapic_setup);
 +
-+unsigned int def_to_bigsmp = 0;
++static int __init ioapic_pirq_setup(char *str)
++{
++	int i, max;
++	int ints[MAX_PIRQS+1];
 +
-+/* Processor that is doing the boot up */
-+unsigned int boot_cpu_physical_apicid = -1U;
-+/* Internal processor count */
-+static unsigned int __devinitdata num_processors;
++	get_options(str, ARRAY_SIZE(ints), ints);
 +
-+/* Bitmask of physically existing CPUs */
-+physid_mask_t phys_cpu_present_map;
++	for (i = 0; i < MAX_PIRQS; i++)
++		pirq_entries[i] = -1;
 +
-+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++	pirqs_enabled = 1;
++	apic_printk(APIC_VERBOSE, KERN_INFO
++			"PIRQ redirection, working around broken MP-BIOS.\n");
++	max = MAX_PIRQS;
++	if (ints[0] < MAX_PIRQS)
++		max = ints[0];
 +
-+/*
-+ * Intel MP BIOS table parsing routines:
-+ */
++	for (i = 0; i < max; i++) {
++		apic_printk(APIC_VERBOSE, KERN_DEBUG
++				"... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++		/*
++		 * PIRQs are mapped upside down, usually.
++		 */
++		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++	}
++	return 1;
++}
 +
++__setup("pirq=", ioapic_pirq_setup);
 +
 +/*
-+ * Checksum an MP configuration block.
++ * Find the IRQ entry number of a certain pin.
 + */
-+
-+static int __init mpf_checksum(unsigned char *mp, int len)
++static int find_irq_entry(int apic, int pin, int type)
 +{
-+	int sum = 0;
++	int i;
 +
-+	while (len--)
-+		sum += *mp++;
++	for (i = 0; i < mp_irq_entries; i++)
++		if (mp_irqs[i].mpc_irqtype == type &&
++		    (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++		     mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++		    mp_irqs[i].mpc_dstirq == pin)
++			return i;
 +
-+	return sum & 0xFF;
++	return -1;
 +}
 +
 +/*
-+ * Have to match translation table entries to main table entries by counter
-+ * hence the mpc_record variable .... can't see a less disgusting way of
-+ * doing this ....
++ * Find the pin to which IRQ[irq] (ISA) is connected
 + */
++static int __init find_isa_irq_pin(int irq, int type)
++{
++	int i;
 +
-+static int mpc_record; 
-+static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
++	for (i = 0; i < mp_irq_entries; i++) {
++		int lbus = mp_irqs[i].mpc_srcbus;
 +
-+#ifndef CONFIG_XEN
-+static void __devinit MP_processor_info (struct mpc_config_processor *m)
-+{
-+ 	int ver, apicid;
-+	physid_mask_t phys_cpu;
-+ 	
-+	if (!(m->mpc_cpuflag & CPU_ENABLED))
-+		return;
-+
-+	apicid = mpc_apic_id(m, translation_table[mpc_record]);
-+
-+	if (m->mpc_featureflag&(1<<0))
-+		Dprintk("    Floating point unit present.\n");
-+	if (m->mpc_featureflag&(1<<7))
-+		Dprintk("    Machine Exception supported.\n");
-+	if (m->mpc_featureflag&(1<<8))
-+		Dprintk("    64 bit compare & exchange supported.\n");
-+	if (m->mpc_featureflag&(1<<9))
-+		Dprintk("    Internal APIC present.\n");
-+	if (m->mpc_featureflag&(1<<11))
-+		Dprintk("    SEP present.\n");
-+	if (m->mpc_featureflag&(1<<12))
-+		Dprintk("    MTRR  present.\n");
-+	if (m->mpc_featureflag&(1<<13))
-+		Dprintk("    PGE  present.\n");
-+	if (m->mpc_featureflag&(1<<14))
-+		Dprintk("    MCA  present.\n");
-+	if (m->mpc_featureflag&(1<<15))
-+		Dprintk("    CMOV  present.\n");
-+	if (m->mpc_featureflag&(1<<16))
-+		Dprintk("    PAT  present.\n");
-+	if (m->mpc_featureflag&(1<<17))
-+		Dprintk("    PSE  present.\n");
-+	if (m->mpc_featureflag&(1<<18))
-+		Dprintk("    PSN  present.\n");
-+	if (m->mpc_featureflag&(1<<19))
-+		Dprintk("    Cache Line Flush Instruction present.\n");
-+	/* 20 Reserved */
-+	if (m->mpc_featureflag&(1<<21))
-+		Dprintk("    Debug Trace and EMON Store present.\n");
-+	if (m->mpc_featureflag&(1<<22))
-+		Dprintk("    ACPI Thermal Throttle Registers  present.\n");
-+	if (m->mpc_featureflag&(1<<23))
-+		Dprintk("    MMX  present.\n");
-+	if (m->mpc_featureflag&(1<<24))
-+		Dprintk("    FXSR  present.\n");
-+	if (m->mpc_featureflag&(1<<25))
-+		Dprintk("    XMM  present.\n");
-+	if (m->mpc_featureflag&(1<<26))
-+		Dprintk("    Willamette New Instructions  present.\n");
-+	if (m->mpc_featureflag&(1<<27))
-+		Dprintk("    Self Snoop  present.\n");
-+	if (m->mpc_featureflag&(1<<28))
-+		Dprintk("    HT  present.\n");
-+	if (m->mpc_featureflag&(1<<29))
-+		Dprintk("    Thermal Monitor present.\n");
-+	/* 30, 31 Reserved */
-+
-+
-+	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+		Dprintk("    Bootup CPU\n");
-+		boot_cpu_physical_apicid = m->mpc_apicid;
-+	}
-+
-+	ver = m->mpc_apicver;
-+
-+	/*
-+	 * Validate version
-+	 */
-+	if (ver == 0x0) {
-+		printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
-+				"fixing up to 0x10. (tell your hw vendor)\n",
-+				m->mpc_apicid);
-+		ver = 0x10;
-+	}
-+	apic_version[m->mpc_apicid] = ver;
-+
-+	phys_cpu = apicid_to_cpu_present(apicid);
-+	physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
-+
-+	if (num_processors >= NR_CPUS) {
-+		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-+			"  Processor ignored.\n", NR_CPUS);
-+		return;
-+	}
-+
-+	if (num_processors >= maxcpus) {
-+		printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
-+			" Processor ignored.\n", maxcpus);
-+		return;
-+	}
-+
-+	cpu_set(num_processors, cpu_possible_map);
-+	num_processors++;
-+
-+	/*
-+	 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
-+	 * but we need to work other dependencies like SMP_SUSPEND etc
-+	 * before this can be done without some confusion.
-+	 * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
-+	 *       - Ashok Raj <ashok.raj at intel.com>
-+	 */
-+	if (num_processors > 8) {
-+		switch (boot_cpu_data.x86_vendor) {
-+		case X86_VENDOR_INTEL:
-+			if (!APIC_XAPIC(ver)) {
-+				def_to_bigsmp = 0;
-+				break;
-+			}
-+			/* If P4 and above fall through */
-+		case X86_VENDOR_AMD:
-+			def_to_bigsmp = 1;
-+		}
++		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_MCA
++		    ) &&
++		    (mp_irqs[i].mpc_irqtype == type) &&
++		    (mp_irqs[i].mpc_srcbusirq == irq))
++
++			return mp_irqs[i].mpc_dstirq;
 +	}
-+	bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
-+}
-+#else
-+void __init MP_processor_info (struct mpc_config_processor *m)
-+{
-+	num_processors++;
++	return -1;
 +}
-+#endif /* CONFIG_XEN */
 +
-+static void __init MP_bus_info (struct mpc_config_bus *m)
++static int __init find_isa_irq_apic(int irq, int type)
 +{
-+	char str[7];
-+
-+	memcpy(str, m->mpc_bustype, 6);
-+	str[6] = 0;
++	int i;
 +
-+	mpc_oem_bus_info(m, str, translation_table[mpc_record]);
++	for (i = 0; i < mp_irq_entries; i++) {
++		int lbus = mp_irqs[i].mpc_srcbus;
 +
-+	if (m->mpc_busid >= MAX_MP_BUSSES) {
-+		printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
-+			" is too large, max. supported is %d\n",
-+			m->mpc_busid, str, MAX_MP_BUSSES - 1);
-+		return;
++		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_MCA
++		    ) &&
++		    (mp_irqs[i].mpc_irqtype == type) &&
++		    (mp_irqs[i].mpc_srcbusirq == irq))
++			break;
 +	}
-+
-+	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
-+	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
-+	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
-+		mpc_oem_pci_bus(m, translation_table[mpc_record]);
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
-+		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
-+		mp_current_pci_id++;
-+	} else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
-+	} else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
-+	} else {
-+		printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
++	if (i < mp_irq_entries) {
++		int apic;
++		for(apic = 0; apic < nr_ioapics; apic++) {
++			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
++				return apic;
++		}
 +	}
++
++	return -1;
 +}
 +
-+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
 +{
-+	if (!(m->mpc_flags & MPC_APIC_USABLE))
-+		return;
++	int apic, i, best_guess = -1;
 +
-+	printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
-+		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
-+	if (nr_ioapics >= MAX_IO_APICS) {
-+		printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
-+			MAX_IO_APICS, nr_ioapics);
-+		panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
-+	}
-+	if (!m->mpc_apicaddr) {
-+		printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
-+			" found in MP table, skipping!\n");
-+		return;
++	apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
++		"slot:%d, pin:%d.\n", bus, slot, pin);
++	if (mp_bus_id_to_pci_bus[bus] == -1) {
++		printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++		return -1;
 +	}
-+	mp_ioapics[nr_ioapics] = *m;
-+	nr_ioapics++;
-+}
++	for (i = 0; i < mp_irq_entries; i++) {
++		int lbus = mp_irqs[i].mpc_srcbus;
 +
-+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
-+{
-+	mp_irqs [mp_irq_entries] = *m;
-+	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
-+		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
-+			m->mpc_irqtype, m->mpc_irqflag & 3,
-+			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-+			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
-+	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+		panic("Max # of irq sources exceeded!!\n");
-+}
++		for (apic = 0; apic < nr_ioapics; apic++)
++			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++				break;
 +
-+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
-+{
-+	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
-+		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-+			m->mpc_irqtype, m->mpc_irqflag & 3,
-+			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
-+			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
-+	/*
-+	 * Well it seems all SMP boards in existence
-+	 * use ExtINT/LVT1 == LINT0 and
-+	 * NMI/LVT2 == LINT1 - the following check
-+	 * will show us if this assumptions is false.
-+	 * Until then we do not have to add baggage.
-+	 */
-+	if ((m->mpc_irqtype == mp_ExtINT) &&
-+		(m->mpc_destapiclint != 0))
-+			BUG();
-+	if ((m->mpc_irqtype == mp_NMI) &&
-+		(m->mpc_destapiclint != 1))
-+			BUG();
-+}
++		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++		    !mp_irqs[i].mpc_irqtype &&
++		    (bus == lbus) &&
++		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++			int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
 +
-+#ifdef CONFIG_X86_NUMAQ
-+static void __init MP_translation_info (struct mpc_config_translation *m)
-+{
-+	printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
++			if (!(apic || IO_APIC_IRQ(irq)))
++				continue;
 +
-+	if (mpc_record >= MAX_MPC_ENTRY) 
-+		printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
-+	else
-+		translation_table[mpc_record] = m; /* stash this for later */
-+	if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
-+		node_set_online(m->trans_quad);
++			if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++				return irq;
++			/*
++			 * Use the first all-but-pin matching entry as a
++			 * best-guess fuzzy result for broken mptables.
++			 */
++			if (best_guess < 0)
++				best_guess = irq;
++		}
++	}
++	return best_guess;
 +}
++EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
 +
 +/*
-+ * Read/parse the MPC oem tables
++ * This function currently is only a helper for the i386 smp boot process where 
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
 + */
-+
-+static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
-+	unsigned short oemsize)
++#ifdef CONFIG_SMP
++#ifndef CONFIG_XEN
++void __init setup_ioapic_dest(void)
 +{
-+	int count = sizeof (*oemtable); /* the header size */
-+	unsigned char *oemptr = ((unsigned char *)oemtable)+count;
-+	
-+	mpc_record = 0;
-+	printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
-+	if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
-+	{
-+		printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
-+			oemtable->oem_signature[0],
-+			oemtable->oem_signature[1],
-+			oemtable->oem_signature[2],
-+			oemtable->oem_signature[3]);
-+		return;
-+	}
-+	if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
-+	{
-+		printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
++	int pin, ioapic, irq, irq_entry;
++
++	if (skip_ioapic_setup == 1)
 +		return;
-+	}
-+	while (count < oemtable->oem_length) {
-+		switch (*oemptr) {
-+			case MP_TRANSLATION:
-+			{
-+				struct mpc_config_translation *m=
-+					(struct mpc_config_translation *)oemptr;
-+				MP_translation_info(m);
-+				oemptr += sizeof(*m);
-+				count += sizeof(*m);
-+				++mpc_record;
-+				break;
-+			}
-+			default:
-+			{
-+				printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
-+				return;
-+			}
++
++	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++			if (irq_entry == -1)
++				continue;
++			irq = pin_2_irq(irq_entry, ioapic, pin);
++			set_ioapic_affinity_irq(irq, TARGET_CPUS);
 +		}
-+       }
-+}
 +
-+static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
-+		char *productid)
-+{
-+	if (strncmp(oem, "IBM NUMA", 8))
-+		printk("Warning!  May not be a NUMA-Q system!\n");
-+	if (mpc->mpc_oemptr)
-+		smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
-+				mpc->mpc_oemsize);
++	}
 +}
-+#endif	/* CONFIG_X86_NUMAQ */
++#endif /* !CONFIG_XEN */
++#endif
 +
 +/*
-+ * Read/parse the MPC
++ * EISA Edge/Level control register, ELCR
 + */
-+
-+static int __init smp_read_mpc(struct mp_config_table *mpc)
++static int EISA_ELCR(unsigned int irq)
 +{
-+	char str[16];
-+	char oem[10];
-+	int count=sizeof(*mpc);
-+	unsigned char *mpt=((unsigned char *)mpc)+count;
-+
-+	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
-+		printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
-+			*(u32 *)mpc->mpc_signature);
-+		return 0;
-+	}
-+	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
-+		printk(KERN_ERR "SMP mptable: checksum error!\n");
-+		return 0;
-+	}
-+	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
-+		printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
-+			mpc->mpc_spec);
-+		return 0;
-+	}
-+	if (!mpc->mpc_lapic) {
-+		printk(KERN_ERR "SMP mptable: null local APIC address!\n");
-+		return 0;
++	if (irq < 16) {
++		unsigned int port = 0x4d0 + (irq >> 3);
++		return (inb(port) >> (irq & 7)) & 1;
 +	}
-+	memcpy(oem,mpc->mpc_oem,8);
-+	oem[8]=0;
-+	printk(KERN_INFO "OEM ID: %s ",oem);
++	apic_printk(APIC_VERBOSE, KERN_INFO
++			"Broken MPtable reports ISA irq %d\n", irq);
++	return 0;
++}
 +
-+	memcpy(str,mpc->mpc_productid,12);
-+	str[12]=0;
-+	printk("Product ID: %s ",str);
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value.  If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
 +
-+	mps_oem_check(mpc, oem, str);
++#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx)	(0)
 +
-+	printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
 +
-+	/* 
-+	 * Save the local APIC address (it might be non-default) -- but only
-+	 * if we're not using ACPI.
-+	 */
-+	if (!acpi_lapic)
-+		mp_lapic_addr = mpc->mpc_lapic;
++#define default_ISA_trigger(idx)	(0)
++#define default_ISA_polarity(idx)	(0)
 +
-+	/*
-+	 *	Now process the configuration blocks.
-+	 */
-+	mpc_record = 0;
-+	while (count < mpc->mpc_length) {
-+		switch(*mpt) {
-+			case MP_PROCESSOR:
-+			{
-+				struct mpc_config_processor *m=
-+					(struct mpc_config_processor *)mpt;
-+				/* ACPI may have already provided this data */
-+				if (!acpi_lapic)
-+					MP_processor_info(m);
-+				mpt += sizeof(*m);
-+				count += sizeof(*m);
-+				break;
-+			}
-+			case MP_BUS:
-+			{
-+				struct mpc_config_bus *m=
-+					(struct mpc_config_bus *)mpt;
-+				MP_bus_info(m);
-+				mpt += sizeof(*m);
-+				count += sizeof(*m);
-+				break;
-+			}
-+			case MP_IOAPIC:
-+			{
-+				struct mpc_config_ioapic *m=
-+					(struct mpc_config_ioapic *)mpt;
-+				MP_ioapic_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			case MP_INTSRC:
-+			{
-+				struct mpc_config_intsrc *m=
-+					(struct mpc_config_intsrc *)mpt;
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
 +
-+				MP_intsrc_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			case MP_LINTSRC:
-+			{
-+				struct mpc_config_lintsrc *m=
-+					(struct mpc_config_lintsrc *)mpt;
-+				MP_lintsrc_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			default:
-+			{
-+				count = mpc->mpc_length;
-+				break;
-+			}
-+		}
-+		++mpc_record;
-+	}
-+	clustered_apic_check();
-+	if (!num_processors)
-+		printk(KERN_ERR "SMP mptable: no processors registered!\n");
-+	return num_processors;
-+}
++#define default_PCI_trigger(idx)	(1)
++#define default_PCI_polarity(idx)	(1)
 +
-+static int __init ELCR_trigger(unsigned int irq)
-+{
-+	unsigned int port;
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
 +
-+	port = 0x4d0 + (irq >> 3);
-+	return (inb(port) >> (irq & 7)) & 1;
-+}
++#define default_MCA_trigger(idx)	(1)
++#define default_MCA_polarity(idx)	(0)
 +
-+static void __init construct_default_ioirq_mptable(int mpc_default_type)
++static int __init MPBIOS_polarity(int idx)
 +{
-+	struct mpc_config_intsrc intsrc;
-+	int i;
-+	int ELCR_fallback = 0;
-+
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqflag = 0;			/* conforming */
-+	intsrc.mpc_srcbus = 0;
-+	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
-+
-+	intsrc.mpc_irqtype = mp_INT;
++	int bus = mp_irqs[idx].mpc_srcbus;
++	int polarity;
 +
 +	/*
-+	 *  If true, we have an ISA/PCI system with no IRQ entries
-+	 *  in the MP table. To prevent the PCI interrupts from being set up
-+	 *  incorrectly, we try to use the ELCR. The sanity check to see if
-+	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
-+	 *  never be level sensitive, so we simply see if the ELCR agrees.
-+	 *  If it does, we assume it's valid.
++	 * Determine IRQ line polarity (high active or low active):
 +	 */
-+	if (mpc_default_type == 5) {
-+		printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
-+
-+		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
-+			printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
-+		else {
-+			printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
-+			ELCR_fallback = 1;
++	switch (mp_irqs[idx].mpc_irqflag & 3)
++	{
++		case 0: /* conforms, ie. bus-type dependent polarity */
++		{
++			switch (mp_bus_id_to_type[bus])
++			{
++				case MP_BUS_ISA: /* ISA pin */
++				{
++					polarity = default_ISA_polarity(idx);
++					break;
++				}
++				case MP_BUS_EISA: /* EISA pin */
++				{
++					polarity = default_EISA_polarity(idx);
++					break;
++				}
++				case MP_BUS_PCI: /* PCI pin */
++				{
++					polarity = default_PCI_polarity(idx);
++					break;
++				}
++				case MP_BUS_MCA: /* MCA pin */
++				{
++					polarity = default_MCA_polarity(idx);
++					break;
++				}
++				default:
++				{
++					printk(KERN_WARNING "broken BIOS!!\n");
++					polarity = 1;
++					break;
++				}
++			}
++			break;
 +		}
-+	}
-+
-+	for (i = 0; i < 16; i++) {
-+		switch (mpc_default_type) {
-+		case 2:
-+			if (i == 0 || i == 13)
-+				continue;	/* IRQ0 & IRQ13 not connected */
-+			/* fall through */
-+		default:
-+			if (i == 2)
-+				continue;	/* IRQ2 is never connected */
++		case 1: /* high active */
++		{
++			polarity = 0;
++			break;
 +		}
-+
-+		if (ELCR_fallback) {
-+			/*
-+			 *  If the ELCR indicates a level-sensitive interrupt, we
-+			 *  copy that information over to the MP table in the
-+			 *  irqflag field (level sensitive, active high polarity).
-+			 */
-+			if (ELCR_trigger(i))
-+				intsrc.mpc_irqflag = 13;
-+			else
-+				intsrc.mpc_irqflag = 0;
++		case 2: /* reserved */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			polarity = 1;
++			break;
++		}
++		case 3: /* low active */
++		{
++			polarity = 1;
++			break;
++		}
++		default: /* invalid */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			polarity = 1;
++			break;
 +		}
-+
-+		intsrc.mpc_srcbusirq = i;
-+		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
-+		MP_intsrc_info(&intsrc);
 +	}
-+
-+	intsrc.mpc_irqtype = mp_ExtINT;
-+	intsrc.mpc_srcbusirq = 0;
-+	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
-+	MP_intsrc_info(&intsrc);
++	return polarity;
 +}
 +
-+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
++static int MPBIOS_trigger(int idx)
 +{
-+	struct mpc_config_processor processor;
-+	struct mpc_config_bus bus;
-+	struct mpc_config_ioapic ioapic;
-+	struct mpc_config_lintsrc lintsrc;
-+	int linttypes[2] = { mp_ExtINT, mp_NMI };
-+	int i;
-+
-+	/*
-+	 * local APIC has default address
-+	 */
-+	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++	int bus = mp_irqs[idx].mpc_srcbus;
++	int trigger;
 +
 +	/*
-+	 * 2 CPUs, numbered 0 & 1.
++	 * Determine IRQ trigger mode (edge or level sensitive):
 +	 */
-+	processor.mpc_type = MP_PROCESSOR;
-+	/* Either an integrated APIC or a discrete 82489DX. */
-+	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+	processor.mpc_cpuflag = CPU_ENABLED;
-+	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-+				   (boot_cpu_data.x86_model << 4) |
-+				   boot_cpu_data.x86_mask;
-+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+	processor.mpc_reserved[0] = 0;
-+	processor.mpc_reserved[1] = 0;
-+	for (i = 0; i < 2; i++) {
-+		processor.mpc_apicid = i;
-+		MP_processor_info(&processor);
-+	}
-+
-+	bus.mpc_type = MP_BUS;
-+	bus.mpc_busid = 0;
-+	switch (mpc_default_type) {
-+		default:
-+			printk("???\n");
-+			printk(KERN_ERR "Unknown standard configuration %d\n",
-+				mpc_default_type);
-+			/* fall through */
-+		case 1:
-+		case 5:
-+			memcpy(bus.mpc_bustype, "ISA   ", 6);
++	switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++	{
++		case 0: /* conforms, ie. bus-type dependent */
++		{
++			switch (mp_bus_id_to_type[bus])
++			{
++				case MP_BUS_ISA: /* ISA pin */
++				{
++					trigger = default_ISA_trigger(idx);
++					break;
++				}
++				case MP_BUS_EISA: /* EISA pin */
++				{
++					trigger = default_EISA_trigger(idx);
++					break;
++				}
++				case MP_BUS_PCI: /* PCI pin */
++				{
++					trigger = default_PCI_trigger(idx);
++					break;
++				}
++				case MP_BUS_MCA: /* MCA pin */
++				{
++					trigger = default_MCA_trigger(idx);
++					break;
++				}
++				default:
++				{
++					printk(KERN_WARNING "broken BIOS!!\n");
++					trigger = 1;
++					break;
++				}
++			}
 +			break;
-+		case 2:
-+		case 6:
-+		case 3:
-+			memcpy(bus.mpc_bustype, "EISA  ", 6);
++		}
++		case 1: /* edge */
++		{
++			trigger = 0;
 +			break;
-+		case 4:
-+		case 7:
-+			memcpy(bus.mpc_bustype, "MCA   ", 6);
-+	}
-+	MP_bus_info(&bus);
-+	if (mpc_default_type > 4) {
-+		bus.mpc_busid = 1;
-+		memcpy(bus.mpc_bustype, "PCI   ", 6);
-+		MP_bus_info(&bus);
-+	}
-+
-+	ioapic.mpc_type = MP_IOAPIC;
-+	ioapic.mpc_apicid = 2;
-+	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+	ioapic.mpc_flags = MPC_APIC_USABLE;
-+	ioapic.mpc_apicaddr = 0xFEC00000;
-+	MP_ioapic_info(&ioapic);
-+
-+	/*
-+	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
-+	 */
-+	construct_default_ioirq_mptable(mpc_default_type);
-+
-+	lintsrc.mpc_type = MP_LINTSRC;
-+	lintsrc.mpc_irqflag = 0;		/* conforming */
-+	lintsrc.mpc_srcbusid = 0;
-+	lintsrc.mpc_srcbusirq = 0;
-+	lintsrc.mpc_destapic = MP_APIC_ALL;
-+	for (i = 0; i < 2; i++) {
-+		lintsrc.mpc_irqtype = linttypes[i];
-+		lintsrc.mpc_destapiclint = i;
-+		MP_lintsrc_info(&lintsrc);
++		}
++		case 2: /* reserved */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			trigger = 1;
++			break;
++		}
++		case 3: /* level */
++		{
++			trigger = 1;
++			break;
++		}
++		default: /* invalid */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			trigger = 0;
++			break;
++		}
 +	}
++	return trigger;
 +}
 +
-+static struct intel_mp_floating *mpf_found;
-+
-+/*
-+ * Scan the memory blocks for an SMP configuration block.
-+ */
-+void __init get_smp_config (void)
++static inline int irq_polarity(int idx)
 +{
-+	struct intel_mp_floating *mpf = mpf_found;
++	return MPBIOS_polarity(idx);
++}
 +
-+	/*
-+	 * ACPI supports both logical (e.g. Hyper-Threading) and physical 
-+	 * processors, where MPS only supports physical.
-+	 */
-+	if (acpi_lapic && acpi_ioapic) {
-+		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
-+		return;
-+	}
-+	else if (acpi_lapic)
-+		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++static inline int irq_trigger(int idx)
++{
++	return MPBIOS_trigger(idx);
++}
 +
-+	printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
-+	if (mpf->mpf_feature2 & (1<<7)) {
-+		printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
-+		pic_mode = 1;
-+	} else {
-+		printk(KERN_INFO "    Virtual Wire compatibility mode.\n");
-+		pic_mode = 0;
-+	}
++static int pin_2_irq(int idx, int apic, int pin)
++{
++	int irq, i;
++	int bus = mp_irqs[idx].mpc_srcbus;
 +
 +	/*
-+	 * Now see if we need to read further.
++	 * Debugging check, we are in big trouble if this message pops up!
 +	 */
-+	if (mpf->mpf_feature1 != 0) {
-+
-+		printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
-+		construct_default_ISA_mptable(mpf->mpf_feature1);
-+
-+	} else if (mpf->mpf_physptr) {
++	if (mp_irqs[idx].mpc_dstirq != pin)
++		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
 +
-+		/*
-+		 * Read the physical hardware table.  Anything here will
-+		 * override the defaults.
-+		 */
-+		if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
-+			smp_found_config = 0;
-+			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
-+			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
-+			return;
++	switch (mp_bus_id_to_type[bus])
++	{
++		case MP_BUS_ISA: /* ISA pin */
++		case MP_BUS_EISA:
++		case MP_BUS_MCA:
++		{
++			irq = mp_irqs[idx].mpc_srcbusirq;
++			break;
 +		}
-+		/*
-+		 * If there are no explicit MP IRQ entries, then we are
-+		 * broken.  We set up most of the low 16 IO-APIC pins to
-+		 * ISA defaults and hope it will work.
-+		 */
-+		if (!mp_irq_entries) {
-+			struct mpc_config_bus bus;
-+
-+			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++		case MP_BUS_PCI: /* PCI pin */
++		{
++			/*
++			 * PCI IRQs are mapped in order
++			 */
++			i = irq = 0;
++			while (i < apic)
++				irq += nr_ioapic_registers[i++];
++			irq += pin;
 +
-+			bus.mpc_type = MP_BUS;
-+			bus.mpc_busid = 0;
-+			memcpy(bus.mpc_bustype, "ISA   ", 6);
-+			MP_bus_info(&bus);
++			/*
++			 * For MPS mode, so far only needed by ES7000 platform
++			 */
++			if (ioapic_renumber_irq)
++				irq = ioapic_renumber_irq(apic, irq);
 +
-+			construct_default_ioirq_mptable(0);
++			break;
 +		}
++		default:
++		{
++			printk(KERN_ERR "unknown bus type %d.\n",bus); 
++			irq = 0;
++			break;
++		}
++	}
 +
-+	} else
-+		BUG();
-+
-+	printk(KERN_INFO "Processors: %d\n", num_processors);
 +	/*
-+	 * Only use the first configuration found.
++	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
 +	 */
++	if ((pin >= 16) && (pin <= 23)) {
++		if (pirq_entries[pin-16] != -1) {
++			if (!pirq_entries[pin-16]) {
++				apic_printk(APIC_VERBOSE, KERN_DEBUG
++						"disabling PIRQ%d\n", pin-16);
++			} else {
++				irq = pirq_entries[pin-16];
++				apic_printk(APIC_VERBOSE, KERN_DEBUG
++						"using PIRQ%d -> IRQ %d\n",
++						pin-16, irq);
++			}
++		}
++	}
++	return irq;
 +}
 +
-+static int __init smp_scan_config (unsigned long base, unsigned long length)
++static inline int IO_APIC_irq_trigger(int irq)
 +{
-+	unsigned long *bp = isa_bus_to_virt(base);
-+	struct intel_mp_floating *mpf;
-+
-+	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
-+	if (sizeof(*mpf) != 16)
-+		printk("Error: MPF size\n");
-+
-+	while (length > 0) {
-+		mpf = (struct intel_mp_floating *)bp;
-+		if ((*bp == SMP_MAGIC_IDENT) &&
-+			(mpf->mpf_length == 1) &&
-+			!mpf_checksum((unsigned char *)bp, 16) &&
-+			((mpf->mpf_specification == 1)
-+				|| (mpf->mpf_specification == 4)) ) {
-+
-+			smp_found_config = 1;
-+#ifndef CONFIG_XEN
-+			printk(KERN_INFO "found SMP MP-table at %08lx\n",
-+						virt_to_phys(mpf));
-+			reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
-+			if (mpf->mpf_physptr) {
-+				/*
-+				 * We cannot access to MPC table to compute
-+				 * table size yet, as only few megabytes from
-+				 * the bottom is mapped now.
-+				 * PC-9800's MPC table places on the very last
-+				 * of physical memory; so that simply reserving
-+				 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
-+				 * in reserve_bootmem.
-+				 */
-+				unsigned long size = PAGE_SIZE;
-+				unsigned long end = max_low_pfn * PAGE_SIZE;
-+				if (mpf->mpf_physptr + size > end)
-+					size = end - mpf->mpf_physptr;
-+				reserve_bootmem(mpf->mpf_physptr, size);
-+			}
-+#else
-+			printk(KERN_INFO "found SMP MP-table at %08lx\n",
-+				((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
-+#endif
++	int apic, idx, pin;
 +
-+			mpf_found = mpf;
-+			return 1;
++	for (apic = 0; apic < nr_ioapics; apic++) {
++		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++			idx = find_irq_entry(apic,pin,mp_INT);
++			if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++				return irq_trigger(idx);
 +		}
-+		bp += 4;
-+		length -= 16;
 +	}
++	/*
++	 * nonexistent IRQs are edge default
++	 */
 +	return 0;
 +}
 +
-+void __init find_smp_config (void)
-+{
-+#ifndef CONFIG_XEN
-+	unsigned int address;
-+#endif
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
 +
-+	/*
-+	 * FIXME: Linux assumes you have 640K of base ram..
-+	 * this continues the error...
-+	 *
-+	 * 1) Scan the bottom 1K for a signature
-+	 * 2) Scan the top 1K of base RAM
-+	 * 3) Scan the 64K of bios
-+	 */
-+	if (smp_scan_config(0x0,0x400) ||
-+		smp_scan_config(639*0x400,0x400) ||
-+			smp_scan_config(0xF0000,0x10000))
-+		return;
-+	/*
-+	 * If it is an SMP machine we should know now, unless the
-+	 * configuration is in an EISA/MCA bus machine with an
-+	 * extended bios data area.
-+	 *
-+	 * there is a real-mode segmented pointer pointing to the
-+	 * 4K EBDA area at 0x40E, calculate and scan it here.
-+	 *
-+	 * NOTE! There are Linux loaders that will corrupt the EBDA
-+	 * area, and as such this kind of SMP config may be less
-+	 * trustworthy, simply because the SMP table may have been
-+	 * stomped on during early boot. These loaders are buggy and
-+	 * should be fixed.
-+	 *
-+	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
-+	 */
++static int __assign_irq_vector(int irq)
++{
++	struct physdev_irq irq_op;
++	int vector;
 +
-+#ifndef CONFIG_XEN
-+	address = get_bios_ebda();
-+	if (address)
-+		smp_scan_config(address, 0x400);
-+#endif
-+}
++	BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
 +
-+int es7000_plat;
++	if (irq_vector[irq] > 0)
++		return irq_vector[irq];
++	irq_op.irq = irq;
++	if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
++		return -ENOSPC;
 +
-+/* --------------------------------------------------------------------------
-+                            ACPI-based MP Configuration
-+   -------------------------------------------------------------------------- */
++	vector = irq_op.vector;
++	irq_vector[irq] = vector;
 +
-+#ifdef CONFIG_ACPI
++	return vector;
++}
 +
-+void __init mp_register_lapic_address (
-+	u64			address)
++static int assign_irq_vector(int irq)
 +{
-+#ifndef CONFIG_XEN
-+	mp_lapic_addr = (unsigned long) address;
-+
-+	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
++	unsigned long flags;
++	int vector;
 +
-+	if (boot_cpu_physical_apicid == -1U)
-+		boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
++	spin_lock_irqsave(&vector_lock, flags);
++	vector = __assign_irq_vector(irq);
++	spin_unlock_irqrestore(&vector_lock, flags);
 +
-+	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
-+#endif
++	return vector;
 +}
++#ifndef CONFIG_XEN
++static struct irq_chip ioapic_chip;
 +
++#define IOAPIC_AUTO	-1
++#define IOAPIC_EDGE	0
++#define IOAPIC_LEVEL	1
 +
-+void __devinit mp_register_lapic (
-+	u8			id, 
-+	u8			enabled)
++static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
 +{
-+	struct mpc_config_processor processor;
-+	int			boot_cpu = 0;
-+	
-+	if (MAX_APICS - id <= 0) {
-+		printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
-+			id, MAX_APICS);
-+		return;
++	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++			trigger == IOAPIC_LEVEL)
++		set_irq_chip_and_handler_name(irq, &ioapic_chip,
++					 handle_fasteoi_irq, "fasteoi");
++	else {
++		irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
++		set_irq_chip_and_handler_name(irq, &ioapic_chip,
++					 handle_edge_irq, "edge");
 +	}
-+
-+	if (id == boot_cpu_physical_apicid)
-+		boot_cpu = 1;
-+
-+#ifndef CONFIG_XEN
-+	processor.mpc_type = MP_PROCESSOR;
-+	processor.mpc_apicid = id;
-+	processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
-+	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
-+	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
-+	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 
-+		(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
-+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+	processor.mpc_reserved[0] = 0;
-+	processor.mpc_reserved[1] = 0;
++	set_intr_gate(vector, interrupt[irq]);
++}
++#else
++#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
 +#endif
 +
-+	MP_processor_info(&processor);
-+}
++static void __init setup_IO_APIC_irqs(void)
++{
++	struct IO_APIC_route_entry entry;
++	int apic, pin, idx, irq, first_notcon = 1, vector;
++	unsigned long flags;
 +
-+#ifdef	CONFIG_X86_IO_APIC
++	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
 +
-+#define MP_ISA_BUS		0
-+#define MP_MAX_IOAPIC_PIN	127
++	for (apic = 0; apic < nr_ioapics; apic++) {
++	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
 +
-+static struct mp_ioapic_routing {
-+	int			apic_id;
-+	int			gsi_base;
-+	int			gsi_end;
-+	u32			pin_programmed[4];
-+} mp_ioapic_routing[MAX_IO_APICS];
++		/*
++		 * add it to the IO-APIC irq-routing table:
++		 */
++		memset(&entry,0,sizeof(entry));
 +
++		entry.delivery_mode = INT_DELIVERY_MODE;
++		entry.dest_mode = INT_DEST_MODE;
++		entry.mask = 0;				/* enable IRQ */
++		entry.dest.logical.logical_dest = 
++					cpu_mask_to_apicid(TARGET_CPUS);
 +
-+static int mp_find_ioapic (
-+	int			gsi)
-+{
-+	int			i = 0;
++		idx = find_irq_entry(apic,pin,mp_INT);
++		if (idx == -1) {
++			if (first_notcon) {
++				apic_printk(APIC_VERBOSE, KERN_DEBUG
++						" IO-APIC (apicid-pin) %d-%d",
++						mp_ioapics[apic].mpc_apicid,
++						pin);
++				first_notcon = 0;
++			} else
++				apic_printk(APIC_VERBOSE, ", %d-%d",
++					mp_ioapics[apic].mpc_apicid, pin);
++			continue;
++		}
 +
-+	/* Find the IOAPIC that manages this GSI. */
-+	for (i = 0; i < nr_ioapics; i++) {
-+		if ((gsi >= mp_ioapic_routing[i].gsi_base)
-+			&& (gsi <= mp_ioapic_routing[i].gsi_end))
-+			return i;
-+	}
++		entry.trigger = irq_trigger(idx);
++		entry.polarity = irq_polarity(idx);
 +
-+	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++		if (irq_trigger(idx)) {
++			entry.trigger = 1;
++			entry.mask = 1;
++		}
 +
-+	return -1;
-+}
-+	
++		irq = pin_2_irq(idx, apic, pin);
++		/*
++		 * skip adding the timer int on secondary nodes, which causes
++		 * a small but painful rift in the time-space continuum
++		 */
++		if (multi_timer_check(apic, irq))
++			continue;
++		else
++			add_pin_to_irq(irq, apic, pin);
 +
-+void __init mp_register_ioapic (
-+	u8			id, 
-+	u32			address,
-+	u32			gsi_base)
-+{
-+	int			idx = 0;
-+	int			tmpid;
++		if (/*!apic &&*/ !IO_APIC_IRQ(irq))
++			continue;
 +
-+	if (nr_ioapics >= MAX_IO_APICS) {
-+		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
-+			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
-+		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++		if (IO_APIC_IRQ(irq)) {
++			vector = assign_irq_vector(irq);
++			entry.vector = vector;
++			ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++		
++			if (!apic && (irq < 16))
++				disable_8259A_irq(irq);
++		}
++		spin_lock_irqsave(&ioapic_lock, flags);
++		__ioapic_write_entry(apic, pin, entry);
++		set_native_irq_info(irq, TARGET_CPUS);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
 +	}
-+	if (!address) {
-+		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
-+			" found in MADT table, skipping!\n");
-+		return;
 +	}
 +
-+	idx = nr_ioapics++;
-+
-+	mp_ioapics[idx].mpc_type = MP_IOAPIC;
-+	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
-+	mp_ioapics[idx].mpc_apicaddr = address;
++	if (!first_notcon)
++		apic_printk(APIC_VERBOSE, " not connected.\n");
++}
 +
++/*
++ * Set up the 8259A-master output pin:
++ */
 +#ifndef CONFIG_XEN
-+	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-+#endif
-+	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-+		&& !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
-+		tmpid = io_apic_get_unique_id(idx, id);
-+	else
-+		tmpid = id;
-+	if (tmpid == -1) {
-+		nr_ioapics--;
-+		return;
-+	}
-+	mp_ioapics[idx].mpc_apicid = tmpid;
-+	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
-+	
-+	/* 
-+	 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
-+	 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
-+	 */
-+	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
-+	mp_ioapic_routing[idx].gsi_base = gsi_base;
-+	mp_ioapic_routing[idx].gsi_end = gsi_base + 
-+		io_apic_get_redir_entries(idx);
-+
-+	printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
-+		"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
-+		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
-+		mp_ioapic_routing[idx].gsi_base,
-+		mp_ioapic_routing[idx].gsi_end);
++static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
++{
++	struct IO_APIC_route_entry entry;
 +
-+	return;
-+}
++	memset(&entry,0,sizeof(entry));
 +
++	disable_8259A_irq(0);
 +
-+void __init mp_override_legacy_irq (
-+	u8			bus_irq,
-+	u8			polarity, 
-+	u8			trigger, 
-+	u32			gsi)
-+{
-+	struct mpc_config_intsrc intsrc;
-+	int			ioapic = -1;
-+	int			pin = -1;
++	/* mask LVT0 */
++	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
 +
-+	/* 
-+	 * Convert 'gsi' to 'ioapic.pin'.
++	/*
++	 * We use logical delivery to get the timer IRQ
++	 * to the first CPU.
 +	 */
-+	ioapic = mp_find_ioapic(gsi);
-+	if (ioapic < 0)
-+		return;
-+	pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++	entry.dest_mode = INT_DEST_MODE;
++	entry.mask = 0;					/* unmask IRQ now */
++	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++	entry.delivery_mode = INT_DELIVERY_MODE;
++	entry.polarity = 0;
++	entry.trigger = 0;
++	entry.vector = vector;
 +
 +	/*
-+	 * TBD: This check is for faulty timer entries, where the override
-+	 *      erroneously sets the trigger to level, resulting in a HUGE 
-+	 *      increase of timer interrupts!
++	 * The timer IRQ doesn't have to know that behind the
++	 * scene we have a 8259A-master in AEOI mode ...
 +	 */
-+	if ((bus_irq == 0) && (trigger == 3))
-+		trigger = 1;
++	irq_desc[0].chip = &ioapic_chip;
++	set_irq_handler(0, handle_edge_irq);
 +
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqtype = mp_INT;
-+	intsrc.mpc_irqflag = (trigger << 2) | polarity;
-+	intsrc.mpc_srcbus = MP_ISA_BUS;
-+	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
-+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
-+	intsrc.mpc_dstirq = pin;				    /* INTIN# */
-+
-+	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
-+		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-+		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-+		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
-+
-+	mp_irqs[mp_irq_entries] = intsrc;
-+	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+		panic("Max # of irq sources exceeded!\n");
++	/*
++	 * Add it to the IO-APIC irq-routing table:
++	 */
++	ioapic_write_entry(apic, pin, entry);
 +
-+	return;
++	enable_8259A_irq(0);
 +}
 +
-+void __init mp_config_acpi_legacy_irqs (void)
++static inline void UNEXPECTED_IO_APIC(void)
 +{
-+	struct mpc_config_intsrc intsrc;
-+	int			i = 0;
-+	int			ioapic = -1;
-+
-+	/* 
-+	 * Fabricate the legacy ISA bus (bus #31).
-+	 */
-+	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
-+	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++}
 +
-+	/*
-+	 * Older generations of ES7000 have no legacy identity mappings
-+	 */
-+	if (es7000_plat == 1)
-+		return;
++void __init print_IO_APIC(void)
++{
++	int apic, i;
++	union IO_APIC_reg_00 reg_00;
++	union IO_APIC_reg_01 reg_01;
++	union IO_APIC_reg_02 reg_02;
++	union IO_APIC_reg_03 reg_03;
++	unsigned long flags;
 +
-+	/* 
-+	 * Locate the IOAPIC that manages the ISA IRQs (0-15). 
-+	 */
-+	ioapic = mp_find_ioapic(0);
-+	if (ioapic < 0)
++	if (apic_verbosity == APIC_QUIET)
 +		return;
 +
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqflag = 0;					/* Conforming */
-+	intsrc.mpc_srcbus = MP_ISA_BUS;
-+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++ 	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++	for (i = 0; i < nr_ioapics; i++)
++		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++		       mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
 +
-+	/* 
-+	 * Use the default configuration for the IRQs 0-15.  Unless
-+	 * overriden by (MADT) interrupt source override entries.
++	/*
++	 * We are a bit conservative about what we expect.  We have to
++	 * know about every hardware change ASAP.
 +	 */
-+	for (i = 0; i < 16; i++) {
-+		int idx;
-+
-+		for (idx = 0; idx < mp_irq_entries; idx++) {
-+			struct mpc_config_intsrc *irq = mp_irqs + idx;
++	printk(KERN_INFO "testing the IO APIC.......................\n");
 +
-+			/* Do we already have a mapping for this ISA IRQ? */
-+			if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
-+				break;
++	for (apic = 0; apic < nr_ioapics; apic++) {
 +
-+			/* Do we already have a mapping for this IOAPIC pin */
-+			if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
-+				(irq->mpc_dstirq == i))
-+				break;
-+		}
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_00.raw = io_apic_read(apic, 0);
++	reg_01.raw = io_apic_read(apic, 1);
++	if (reg_01.bits.version >= 0x10)
++		reg_02.raw = io_apic_read(apic, 2);
++	if (reg_01.bits.version >= 0x20)
++		reg_03.raw = io_apic_read(apic, 3);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
 +
-+		if (idx != mp_irq_entries) {
-+			printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
-+			continue;			/* IRQ already used */
-+		}
++	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
++	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
++	printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);
++	if (reg_00.bits.ID >= get_physical_broadcast())
++		UNEXPECTED_IO_APIC();
++	if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++		UNEXPECTED_IO_APIC();
 +
-+		intsrc.mpc_irqtype = mp_INT;
-+		intsrc.mpc_srcbusirq = i;		   /* Identity mapped */
-+		intsrc.mpc_dstirq = i;
++	printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
++	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
++	if (	(reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++		(reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++		(reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++		(reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++		(reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++		(reg_01.bits.entries != 0x2E) &&
++		(reg_01.bits.entries != 0x3F)
++	)
++		UNEXPECTED_IO_APIC();
 +
-+		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
-+			"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-+			(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-+			intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 
-+			intsrc.mpc_dstirq);
++	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
++	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
++	if (	(reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++		(reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++		(reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++		(reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++		(reg_01.bits.version != 0x20)    /* Intel P64H (82806 AA) */
++	)
++		UNEXPECTED_IO_APIC();
++	if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++		UNEXPECTED_IO_APIC();
 +
-+		mp_irqs[mp_irq_entries] = intsrc;
-+		if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+			panic("Max # of irq sources exceeded!\n");
++	/*
++	 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
++	 * but the value of reg_02 is read as the previous read register
++	 * value, so ignore it if reg_02 == reg_01.
++	 */
++	if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
++		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
++		if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++			UNEXPECTED_IO_APIC();
 +	}
-+}
 +
-+#define MAX_GSI_NUM	4096
-+
-+int mp_register_gsi (u32 gsi, int triggering, int polarity)
-+{
-+	int			ioapic = -1;
-+	int			ioapic_pin = 0;
-+	int			idx, bit = 0;
-+	static int		pci_irq = 16;
 +	/*
-+	 * Mapping between Global System Interrups, which
-+	 * represent all possible interrupts, and IRQs
-+	 * assigned to actual devices.
++	 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
++	 * or reg_03, but the value of reg_0[23] is read as the previous read
++	 * register value, so ignore it if reg_03 == reg_0[12].
 +	 */
-+	static int		gsi_to_irq[MAX_GSI_NUM];
-+
-+	/* Don't set up the ACPI SCI because it's already set up */
-+	if (acpi_fadt.sci_int == gsi)
-+		return gsi;
-+
-+	ioapic = mp_find_ioapic(gsi);
-+	if (ioapic < 0) {
-+		printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
-+		return gsi;
++	if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
++	    reg_03.raw != reg_01.raw) {
++		printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
++		printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
++		if (reg_03.bits.__reserved_1)
++			UNEXPECTED_IO_APIC();
 +	}
 +
-+	ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++	printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++			  " Stat Dest Deli Vect:   \n");
 +
-+	if (ioapic_renumber_irq)
-+		gsi = ioapic_renumber_irq(ioapic, gsi);
++	for (i = 0; i <= reg_01.bits.entries; i++) {
++		struct IO_APIC_route_entry entry;
 +
-+	/* 
-+	 * Avoid pin reprogramming.  PRTs typically include entries  
-+	 * with redundant pin->gsi mappings (but unique PCI devices);
-+	 * we only program the IOAPIC on the first.
-+	 */
-+	bit = ioapic_pin % 32;
-+	idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
-+	if (idx > 3) {
-+		printk(KERN_ERR "Invalid reference to IOAPIC pin "
-+			"%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 
-+			ioapic_pin);
-+		return gsi;
-+	}
-+	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
-+		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
-+			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
-+		return gsi_to_irq[gsi];
-+	}
++		entry = ioapic_read_entry(apic, i);
 +
-+	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++		printk(KERN_DEBUG " %02x %03X %02X  ",
++			i,
++			entry.dest.logical.logical_dest,
++			entry.dest.physical.physical_dest
++		);
 +
-+	if (triggering == ACPI_LEVEL_SENSITIVE) {
-+		/*
-+		 * For PCI devices assign IRQs in order, avoiding gaps
-+		 * due to unused I/O APIC pins.
-+		 */
-+		int irq = gsi;
-+		if (gsi < MAX_GSI_NUM) {
-+			/*
-+			 * Retain the VIA chipset work-around (gsi > 15), but
-+			 * avoid a problem where the 8254 timer (IRQ0) is setup
-+			 * via an override (so it's not on pin 0 of the ioapic),
-+			 * and at the same time, the pin 0 interrupt is a PCI
-+			 * type.  The gsi > 15 test could cause these two pins
-+			 * to be shared as IRQ0, and they are not shareable.
-+			 * So test for this condition, and if necessary, avoid
-+			 * the pin collision.
-+			 */
-+			if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
-+				gsi = pci_irq++;
-+			/*
-+			 * Don't assign IRQ used by ACPI SCI
-+			 */
-+			if (gsi == acpi_fadt.sci_int)
-+				gsi = pci_irq++;
-+			gsi_to_irq[irq] = gsi;
-+		} else {
-+			printk(KERN_ERR "GSI %u is too high\n", gsi);
-+			return gsi;
++		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
++			entry.mask,
++			entry.trigger,
++			entry.irr,
++			entry.polarity,
++			entry.delivery_status,
++			entry.dest_mode,
++			entry.delivery_mode,
++			entry.vector
++		);
++	}
++	}
++	printk(KERN_DEBUG "IRQ to pin mappings:\n");
++	for (i = 0; i < NR_IRQS; i++) {
++		struct irq_pin_list *entry = irq_2_pin + i;
++		if (entry->pin < 0)
++			continue;
++		printk(KERN_DEBUG "IRQ%d ", i);
++		for (;;) {
++			printk("-> %d:%d", entry->apic, entry->pin);
++			if (!entry->next)
++				break;
++			entry = irq_2_pin + entry->next;
 +		}
++		printk("\n");
 +	}
 +
-+	io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
-+		    triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
-+		    polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
-+	return gsi;
-+}
-+
-+#endif /* CONFIG_X86_IO_APIC */
-+#endif /* CONFIG_ACPI */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/pci-dma-xen.c linux-2.6.18-xen/arch/i386/kernel/pci-dma-xen.c
---- linux-2.6.18.3/arch/i386/kernel/pci-dma-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/pci-dma-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,379 @@
-+/*
-+ * Dynamic DMA mapping support.
-+ *
-+ * On i386 there is no hardware dynamic DMA address translation,
-+ * so consistent alloc/free are merely page allocation/freeing.
-+ * The rest of the dynamic DMA mapping interface is implemented
-+ * in asm/pci.h.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/mm.h>
-+#include <linux/string.h>
-+#include <linux/pci.h>
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <asm/io.h>
-+#include <xen/balloon.h>
-+#include <asm/tlbflush.h>
-+#include <asm-i386/mach-xen/asm/swiotlb.h>
-+#include <asm/bug.h>
-+
-+#ifdef __x86_64__
-+int iommu_merge __read_mostly = 0;
-+EXPORT_SYMBOL(iommu_merge);
-+
-+dma_addr_t bad_dma_address __read_mostly;
-+EXPORT_SYMBOL(bad_dma_address);
-+
-+/* This tells the BIO block layer to assume merging. Default to off
-+   because we cannot guarantee merging later. */
-+int iommu_bio_merge __read_mostly = 0;
-+EXPORT_SYMBOL(iommu_bio_merge);
-+
-+int iommu_sac_force __read_mostly = 0;
-+EXPORT_SYMBOL(iommu_sac_force);
-+
-+int no_iommu __read_mostly;
-+#ifdef CONFIG_IOMMU_DEBUG
-+int panic_on_overflow __read_mostly = 1;
-+int force_iommu __read_mostly = 1;
-+#else
-+int panic_on_overflow __read_mostly = 0;
-+int force_iommu __read_mostly= 0;
-+#endif
-+
-+/* Set this to 1 if there is a HW IOMMU in the system */
-+int iommu_detected __read_mostly = 0;
-+
-+void __init pci_iommu_alloc(void)
-+{
-+	/*
-+	 * The order of these functions is important for
-+	 * fall-back/fail-over reasons
-+	 */
-+#ifdef CONFIG_IOMMU
-+	iommu_hole_init();
-+#endif
-+
-+#ifdef CONFIG_CALGARY_IOMMU
-+#include <asm/calgary.h>
-+	detect_calgary();
-+#endif
-+
-+#ifdef CONFIG_SWIOTLB
-+	pci_swiotlb_init();
-+#endif
-+}
++	printk(KERN_INFO ".................................... done.\n");
 +
-+__init int iommu_setup(char *p)
-+{
-+    return 1;
++	return;
 +}
-+#endif
-+
-+struct dma_coherent_mem {
-+	void		*virt_base;
-+	u32		device_base;
-+	int		size;
-+	int		flags;
-+	unsigned long	*bitmap;
-+};
 +
-+#define IOMMU_BUG_ON(test)				\
-+do {							\
-+	if (unlikely(test)) {				\
-+		printk(KERN_ALERT "Fatal DMA error! "	\
-+		       "Please use 'swiotlb=force'\n");	\
-+		BUG();					\
-+	}						\
-+} while (0)
++#if 0
 +
-+int
-+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+	   enum dma_data_direction direction)
++static void print_APIC_bitfield (int base)
 +{
-+	int i, rc;
++	unsigned int v;
++	int i, j;
 +
-+	if (direction == DMA_NONE)
-+		BUG();
-+	WARN_ON(nents == 0 || sg[0].length == 0);
++	if (apic_verbosity == APIC_QUIET)
++		return;
 +
-+	if (swiotlb) {
-+		rc = swiotlb_map_sg(hwdev, sg, nents, direction);
-+	} else {
-+		for (i = 0; i < nents; i++ ) {
-+			sg[i].dma_address =
-+				page_to_bus(sg[i].page) + sg[i].offset;
-+			sg[i].dma_length  = sg[i].length;
-+			BUG_ON(!sg[i].page);
-+			IOMMU_BUG_ON(address_needs_mapping(
-+				hwdev, sg[i].dma_address));
++	printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++	for (i = 0; i < 8; i++) {
++		v = apic_read(base + i*0x10);
++		for (j = 0; j < 32; j++) {
++			if (v & (1<<j))
++				printk("1");
++			else
++				printk("0");
 +		}
-+		rc = nents;
++		printk("\n");
 +	}
-+
-+	flush_write_buffers();
-+	return rc;
 +}
-+EXPORT_SYMBOL(dma_map_sg);
 +
-+void
-+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+	     enum dma_data_direction direction)
++void /*__init*/ print_local_APIC(void * dummy)
 +{
-+	BUG_ON(direction == DMA_NONE);
-+	if (swiotlb)
-+		swiotlb_unmap_sg(hwdev, sg, nents, direction);
-+}
-+EXPORT_SYMBOL(dma_unmap_sg);
++	unsigned int v, ver, maxlvt;
 +
-+/*
-+ * XXX This file is also used by xenLinux/ia64. 
-+ * "defined(__i386__) || defined (__x86_64__)" means "!defined(__ia64__)".
-+ * This #if work around should be removed once this file is merbed back into
-+ * i386' pci-dma or is moved to drivers/xen/core.
-+ */
-+#if defined(__i386__) || defined(__x86_64__)
-+dma_addr_t
-+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-+	     size_t size, enum dma_data_direction direction)
-+{
-+	dma_addr_t dma_addr;
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++		smp_processor_id(), hard_smp_processor_id());
++	v = apic_read(APIC_ID);
++	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
++	v = apic_read(APIC_LVR);
++	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++	ver = GET_APIC_VERSION(v);
++	maxlvt = get_maxlvt();
 +
-+	BUG_ON(direction == DMA_NONE);
++	v = apic_read(APIC_TASKPRI);
++	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
 +
-+	if (swiotlb) {
-+		dma_addr = swiotlb_map_page(
-+			dev, page, offset, size, direction);
-+	} else {
-+		dma_addr = page_to_bus(page) + offset;
-+		IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
++	if (APIC_INTEGRATED(ver)) {			/* !82489DX */
++		v = apic_read(APIC_ARBPRI);
++		printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++			v & APIC_ARBPRI_MASK);
++		v = apic_read(APIC_PROCPRI);
++		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
 +	}
 +
-+	return dma_addr;
-+}
-+EXPORT_SYMBOL(dma_map_page);
++	v = apic_read(APIC_EOI);
++	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++	v = apic_read(APIC_RRR);
++	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++	v = apic_read(APIC_LDR);
++	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++	v = apic_read(APIC_DFR);
++	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++	v = apic_read(APIC_SPIV);
++	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
 +
-+void
-+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-+	       enum dma_data_direction direction)
-+{
-+	BUG_ON(direction == DMA_NONE);
-+	if (swiotlb)
-+		swiotlb_unmap_page(dev, dma_address, size, direction);
-+}
-+EXPORT_SYMBOL(dma_unmap_page);
-+#endif /* defined(__i386__) || defined(__x86_64__) */
++	printk(KERN_DEBUG "... APIC ISR field:\n");
++	print_APIC_bitfield(APIC_ISR);
++	printk(KERN_DEBUG "... APIC TMR field:\n");
++	print_APIC_bitfield(APIC_TMR);
++	printk(KERN_DEBUG "... APIC IRR field:\n");
++	print_APIC_bitfield(APIC_IRR);
 +
-+int
-+dma_mapping_error(dma_addr_t dma_addr)
-+{
-+	if (swiotlb)
-+		return swiotlb_dma_mapping_error(dma_addr);
-+	return 0;
-+}
-+EXPORT_SYMBOL(dma_mapping_error);
++	if (APIC_INTEGRATED(ver)) {		/* !82489DX */
++		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
++			apic_write(APIC_ESR, 0);
++		v = apic_read(APIC_ESR);
++		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++	}
 +
-+int
-+dma_supported(struct device *dev, u64 mask)
-+{
-+	if (swiotlb)
-+		return swiotlb_dma_supported(dev, mask);
-+	/*
-+	 * By default we'll BUG when an infeasible DMA is requested, and
-+	 * request swiotlb=force (see IOMMU_BUG_ON).
-+	 */
-+	return 1;
-+}
-+EXPORT_SYMBOL(dma_supported);
++	v = apic_read(APIC_ICR);
++	printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++	v = apic_read(APIC_ICR2);
++	printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
 +
-+void *dma_alloc_coherent(struct device *dev, size_t size,
-+			   dma_addr_t *dma_handle, gfp_t gfp)
-+{
-+	void *ret;
-+	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-+	unsigned int order = get_order(size);
-+	unsigned long vstart;
-+	/* ignore region specifiers */
-+	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
++	v = apic_read(APIC_LVTT);
++	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
 +
-+	if (mem) {
-+		int page = bitmap_find_free_region(mem->bitmap, mem->size,
-+						     order);
-+		if (page >= 0) {
-+			*dma_handle = mem->device_base + (page << PAGE_SHIFT);
-+			ret = mem->virt_base + (page << PAGE_SHIFT);
-+			memset(ret, 0, size);
-+			return ret;
-+		}
-+		if (mem->flags & DMA_MEMORY_EXCLUSIVE)
-+			return NULL;
++	if (maxlvt > 3) {                       /* PC is LVT#4. */
++		v = apic_read(APIC_LVTPC);
++		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
 +	}
++	v = apic_read(APIC_LVT0);
++	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++	v = apic_read(APIC_LVT1);
++	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
 +
-+	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-+		gfp |= GFP_DMA;
-+
-+	vstart = __get_free_pages(gfp, order);
-+	ret = (void *)vstart;
-+
-+	if (ret != NULL) {
-+		/* NB. Hardcode 31 address bits for now: aacraid limitation. */
-+		if (xen_create_contiguous_region(vstart, order, 31) != 0) {
-+			free_pages(vstart, order);
-+			return NULL;
-+		}
-+		memset(ret, 0, size);
-+		*dma_handle = virt_to_bus(ret);
++	if (maxlvt > 2) {			/* ERR is LVT#3. */
++		v = apic_read(APIC_LVTERR);
++		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
 +	}
-+	return ret;
++
++	v = apic_read(APIC_TMICT);
++	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++	v = apic_read(APIC_TMCCT);
++	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++	v = apic_read(APIC_TDCR);
++	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++	printk("\n");
 +}
-+EXPORT_SYMBOL(dma_alloc_coherent);
 +
-+void dma_free_coherent(struct device *dev, size_t size,
-+			 void *vaddr, dma_addr_t dma_handle)
++void print_all_local_APICs (void)
 +{
-+	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-+	int order = get_order(size);
-+	
-+	if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
-+		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-+
-+		bitmap_release_region(mem->bitmap, page, order);
-+	} else {
-+		xen_destroy_contiguous_region((unsigned long)vaddr, order);
-+		free_pages((unsigned long)vaddr, order);
-+	}
++	on_each_cpu(print_local_APIC, NULL, 1, 1);
 +}
-+EXPORT_SYMBOL(dma_free_coherent);
 +
-+#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
-+				dma_addr_t device_addr, size_t size, int flags)
++void /*__init*/ print_PIC(void)
 +{
-+	void __iomem *mem_base;
-+	int pages = size >> PAGE_SHIFT;
-+	int bitmap_size = (pages + 31)/32;
++	unsigned int v;
++	unsigned long flags;
 +
-+	if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
-+		goto out;
-+	if (!size)
-+		goto out;
-+	if (dev->dma_mem)
-+		goto out;
++	if (apic_verbosity == APIC_QUIET)
++		return;
 +
-+	/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
++	printk(KERN_DEBUG "\nprinting PIC contents\n");
 +
-+	mem_base = ioremap(bus_addr, size);
-+	if (!mem_base)
-+		goto out;
++	spin_lock_irqsave(&i8259A_lock, flags);
 +
-+	dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
-+	if (!dev->dma_mem)
-+		goto out;
-+	memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
-+	dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
-+	if (!dev->dma_mem->bitmap)
-+		goto free1_out;
-+	memset(dev->dma_mem->bitmap, 0, bitmap_size);
++	v = inb(0xa1) << 8 | inb(0x21);
++	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
 +
-+	dev->dma_mem->virt_base = mem_base;
-+	dev->dma_mem->device_base = device_addr;
-+	dev->dma_mem->size = pages;
-+	dev->dma_mem->flags = flags;
++	v = inb(0xa0) << 8 | inb(0x20);
++	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
 +
-+	if (flags & DMA_MEMORY_MAP)
-+		return DMA_MEMORY_MAP;
++	outb(0x0b,0xa0);
++	outb(0x0b,0x20);
++	v = inb(0xa0) << 8 | inb(0x20);
++	outb(0x0a,0xa0);
++	outb(0x0a,0x20);
 +
-+	return DMA_MEMORY_IO;
++	spin_unlock_irqrestore(&i8259A_lock, flags);
 +
-+ free1_out:
-+	kfree(dev->dma_mem->bitmap);
-+ out:
-+	return 0;
-+}
-+EXPORT_SYMBOL(dma_declare_coherent_memory);
++	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
 +
-+void dma_release_declared_memory(struct device *dev)
-+{
-+	struct dma_coherent_mem *mem = dev->dma_mem;
-+	
-+	if(!mem)
-+		return;
-+	dev->dma_mem = NULL;
-+	iounmap(mem->virt_base);
-+	kfree(mem->bitmap);
-+	kfree(mem);
++	v = inb(0x4d1) << 8 | inb(0x4d0);
++	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
 +}
-+EXPORT_SYMBOL(dma_release_declared_memory);
-+
-+void *dma_mark_declared_memory_occupied(struct device *dev,
-+					dma_addr_t device_addr, size_t size)
-+{
-+	struct dma_coherent_mem *mem = dev->dma_mem;
-+	int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+	int pos, err;
 +
-+	if (!mem)
-+		return ERR_PTR(-EINVAL);
++#endif  /*  0  */
 +
-+	pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
-+	err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
-+	if (err != 0)
-+		return ERR_PTR(err);
-+	return mem->virt_base + (pos << PAGE_SHIFT);
-+}
-+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-+#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
++#else
++void __init print_IO_APIC(void) { }
++#endif /* !CONFIG_XEN */
 +
-+dma_addr_t
-+dma_map_single(struct device *dev, void *ptr, size_t size,
-+	       enum dma_data_direction direction)
++static void __init enable_IO_APIC(void)
 +{
-+	dma_addr_t dma;
++	union IO_APIC_reg_01 reg_01;
++	int i8259_apic, i8259_pin;
++	int i, apic;
++	unsigned long flags;
 +
-+	if (direction == DMA_NONE)
-+		BUG();
-+	WARN_ON(size == 0);
++	for (i = 0; i < PIN_MAP_SIZE; i++) {
++		irq_2_pin[i].pin = -1;
++		irq_2_pin[i].next = 0;
++	}
++	if (!pirqs_enabled)
++		for (i = 0; i < MAX_PIRQS; i++)
++			pirq_entries[i] = -1;
 +
-+	if (swiotlb) {
-+		dma = swiotlb_map_single(dev, ptr, size, direction);
-+	} else {
-+		dma = virt_to_bus(ptr);
-+		IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
-+		IOMMU_BUG_ON(address_needs_mapping(dev, dma));
++	/*
++	 * The number of IO-APIC IRQ registers (== #pins):
++	 */
++	for (apic = 0; apic < nr_ioapics; apic++) {
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_01.raw = io_apic_read(apic, 1);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		nr_ioapic_registers[apic] = reg_01.bits.entries+1;
 +	}
++	for(apic = 0; apic < nr_ioapics; apic++) {
++		int pin;
++		/* See if any of the pins is in ExtINT mode */
++		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++			struct IO_APIC_route_entry entry;
++			entry = ioapic_read_entry(apic, pin);
 +
-+	flush_write_buffers();
-+	return dma;
-+}
-+EXPORT_SYMBOL(dma_map_single);
 +
-+void
-+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-+		 enum dma_data_direction direction)
-+{
-+	if (direction == DMA_NONE)
-+		BUG();
-+	if (swiotlb)
-+		swiotlb_unmap_single(dev, dma_addr, size, direction);
-+}
-+EXPORT_SYMBOL(dma_unmap_single);
++			/* If the interrupt line is enabled and in ExtInt mode
++			 * I have found the pin where the i8259 is connected.
++			 */
++			if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
++				ioapic_i8259.apic = apic;
++				ioapic_i8259.pin  = pin;
++				goto found_i8259;
++			}
++		}
++	}
++ found_i8259:
++	/* Look to see what if the MP table has reported the ExtINT */
++	/* If we could not find the appropriate pin by looking at the ioapic
++	 * the i8259 probably is not connected the ioapic but give the
++	 * mptable a chance anyway.
++	 */
++	i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
++	i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
++	/* Trust the MP table if nothing is setup in the hardware */
++	if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
++		printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
++		ioapic_i8259.pin  = i8259_pin;
++		ioapic_i8259.apic = i8259_apic;
++	}
++	/* Complain if the MP table and the hardware disagree */
++	if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
++		(i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
++	{
++		printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
++	}
 +
-+void
-+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-+			enum dma_data_direction direction)
-+{
-+	if (swiotlb)
-+		swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
++	/*
++	 * Do not trust the IO-APIC being empty at bootup
++	 */
++	clear_IO_APIC();
 +}
-+EXPORT_SYMBOL(dma_sync_single_for_cpu);
 +
-+void
-+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
-+                           enum dma_data_direction direction)
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
 +{
-+	if (swiotlb)
-+		swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
++	/*
++	 * Clear the IO-APIC before rebooting:
++	 */
++	clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++	/*
++	 * If the i8259 is routed through an IOAPIC
++	 * Put that IOAPIC in virtual wire mode
++	 * so legacy interrupts can be delivered.
++	 */
++	if (ioapic_i8259.pin != -1) {
++		struct IO_APIC_route_entry entry;
++
++		memset(&entry, 0, sizeof(entry));
++		entry.mask            = 0; /* Enabled */
++		entry.trigger         = 0; /* Edge */
++		entry.irr             = 0;
++		entry.polarity        = 0; /* High */
++		entry.delivery_status = 0;
++		entry.dest_mode       = 0; /* Physical */
++		entry.delivery_mode   = dest_ExtINT; /* ExtInt */
++		entry.vector          = 0;
++		entry.dest.physical.physical_dest =
++					GET_APIC_ID(apic_read(APIC_ID));
++
++		/*
++		 * Add it to the IO-APIC irq-routing table:
++		 */
++		ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
++	}
++	disconnect_bsp_APIC(ioapic_i8259.pin != -1);
++#endif
 +}
-+EXPORT_SYMBOL(dma_sync_single_for_device);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/process-xen.c linux-2.6.18-xen/arch/i386/kernel/process-xen.c
---- linux-2.6.18.3/arch/i386/kernel/process-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/process-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,812 @@
++
 +/*
-+ *  linux/arch/i386/kernel/process.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
 + *
-+ *  Pentium III FXSR, SSE support
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ * by Matt Domsch <Matt_Domsch at dell.com>  Tue Dec 21 12:25:05 CST 1999
 + */
 +
-+/*
-+ * This file handles the architecture-dependent parts of process handling..
-+ */
++#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
++static void __init setup_ioapic_ids_from_mpc(void)
++{
++	union IO_APIC_reg_00 reg_00;
++	physid_mask_t phys_id_present_map;
++	int apic;
++	int i;
++	unsigned char old_id;
++	unsigned long flags;
 +
-+#include <stdarg.h>
++	/*
++	 * Don't check I/O APIC IDs for xAPIC systems.  They have
++	 * no meaning without the serial APIC bus.
++	 */
++	if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++		|| APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
++		return;
++	/*
++	 * This is broken; anything with a real cpu count has to
++	 * circumvent this idiocy regardless.
++	 */
++	phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
 +
-+#include <linux/cpu.h>
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/elfcore.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/user.h>
-+#include <linux/a.out.h>
-+#include <linux/interrupt.h>
-+#include <linux/utsname.h>
-+#include <linux/delay.h>
-+#include <linux/reboot.h>
-+#include <linux/init.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/module.h>
-+#include <linux/kallsyms.h>
-+#include <linux/ptrace.h>
-+#include <linux/random.h>
++	/*
++	 * Set the IOAPIC ID to the value stored in the MPC table.
++	 */
++	for (apic = 0; apic < nr_ioapics; apic++) {
 +
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/ldt.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/desc.h>
-+#include <asm/vm86.h>
-+#ifdef CONFIG_MATH_EMULATION
-+#include <asm/math_emu.h>
-+#endif
++		/* Read the register 0 value */
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_00.raw = io_apic_read(apic, 0);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		
++		old_id = mp_ioapics[apic].mpc_apicid;
++
++		if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
++			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
++				apic, mp_ioapics[apic].mpc_apicid);
++			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++				reg_00.bits.ID);
++			mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
++		}
++
++		/*
++		 * Sanity check, is the ID really free? Every APIC in a
++		 * system must have a unique ID or we get lots of nice
++		 * 'stuck on smp_invalidate_needed IPI wait' messages.
++		 */
++		if (check_apicid_used(phys_id_present_map,
++					mp_ioapics[apic].mpc_apicid)) {
++			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
++				apic, mp_ioapics[apic].mpc_apicid);
++			for (i = 0; i < get_physical_broadcast(); i++)
++				if (!physid_isset(i, phys_id_present_map))
++					break;
++			if (i >= get_physical_broadcast())
++				panic("Max APIC ID exceeded!\n");
++			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++				i);
++			physid_set(i, phys_id_present_map);
++			mp_ioapics[apic].mpc_apicid = i;
++		} else {
++			physid_mask_t tmp;
++			tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
++			apic_printk(APIC_VERBOSE, "Setting %d in the "
++					"phys_id_present_map\n",
++					mp_ioapics[apic].mpc_apicid);
++			physids_or(phys_id_present_map, phys_id_present_map, tmp);
++		}
 +
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/vcpu.h>
-+#include <xen/cpu_hotplug.h>
 +
-+#include <linux/err.h>
++		/*
++		 * We need to adjust the IRQ routing table
++		 * if the ID changed.
++		 */
++		if (old_id != mp_ioapics[apic].mpc_apicid)
++			for (i = 0; i < mp_irq_entries; i++)
++				if (mp_irqs[i].mpc_dstapic == old_id)
++					mp_irqs[i].mpc_dstapic
++						= mp_ioapics[apic].mpc_apicid;
 +
-+#include <asm/tlbflush.h>
-+#include <asm/cpu.h>
++		/*
++		 * Read the right value from the MPC table and
++		 * write it into the ID register.
++	 	 */
++		apic_printk(APIC_VERBOSE, KERN_INFO
++			"...changing IO-APIC physical APIC ID to %d ...",
++			mp_ioapics[apic].mpc_apicid);
 +
-+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
++		reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++		spin_lock_irqsave(&ioapic_lock, flags);
++		io_apic_write(apic, 0, reg_00.raw);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
 +
-+static int hlt_counter;
++		/*
++		 * Sanity check
++		 */
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_00.raw = io_apic_read(apic, 0);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++			printk("could not set ID!\n");
++		else
++			apic_printk(APIC_VERBOSE, " ok.\n");
++	}
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
 +
-+unsigned long boot_option_idle_override = 0;
-+EXPORT_SYMBOL(boot_option_idle_override);
++static int no_timer_check __initdata;
 +
-+/*
-+ * Return saved PC of a blocked thread.
-+ */
-+unsigned long thread_saved_pc(struct task_struct *tsk)
++static int __init notimercheck(char *s)
 +{
-+	return ((unsigned long *)tsk->thread.esp)[3];
++	no_timer_check = 1;
++	return 1;
 +}
++__setup("no_timer_check", notimercheck);
 +
++#ifndef CONFIG_XEN
 +/*
-+ * Powermanagement idle function, if any..
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ *	- timer IRQ defaults to IO-APIC IRQ
++ *	- if this function detects that timer IRQs are defunct, then we fall
++ *	  back to ISA timer IRQs
 + */
-+void (*pm_idle)(void);
-+EXPORT_SYMBOL(pm_idle);
-+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-+
-+void disable_hlt(void)
++int __init timer_irq_works(void)
 +{
-+	hlt_counter++;
-+}
++	unsigned long t1 = jiffies;
 +
-+EXPORT_SYMBOL(disable_hlt);
++	if (no_timer_check)
++		return 1;
 +
-+void enable_hlt(void)
-+{
-+	hlt_counter--;
++	local_irq_enable();
++	/* Let ten ticks pass... */
++	mdelay((10 * 1000) / HZ);
++
++	/*
++	 * Expect a few ticks at least, to be sure some possible
++	 * glue logic does not lock up after one or two first
++	 * ticks in a non-ExtINT mode.  Also the local APIC
++	 * might have cached one ExtINT interrupt.  Finally, at
++	 * least one tick may be lost due to delays.
++	 */
++	if (jiffies - t1 > 4)
++		return 1;
++
++	return 0;
 +}
 +
-+EXPORT_SYMBOL(enable_hlt);
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
 +
-+/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
-+void xen_idle(void)
++/*
++ * Startup quirk:
++ *
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ *
++ * (We do this for level-triggered IRQs too - it cannot hurt.)
++ */
++static unsigned int startup_ioapic_irq(unsigned int irq)
 +{
-+	local_irq_disable();
++	int was_pending = 0;
++	unsigned long flags;
 +
-+	if (need_resched())
-+		local_irq_enable();
-+	else {
-+		current_thread_info()->status &= ~TS_POLLING;
-+		smp_mb__after_clear_bit();
-+		safe_halt();
-+		current_thread_info()->status |= TS_POLLING;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	if (irq < 16) {
++		disable_8259A_irq(irq);
++		if (i8259A_irq_pending(irq))
++			was_pending = 1;
 +	}
++	__unmask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return was_pending;
 +}
-+#ifdef CONFIG_APM_MODULE
-+EXPORT_SYMBOL(default_idle);
-+#endif
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+extern cpumask_t cpu_initialized;
-+static inline void play_dead(void)
++static void ack_ioapic_irq(unsigned int irq)
 +{
-+	idle_task_exit();
-+	local_irq_disable();
-+	cpu_clear(smp_processor_id(), cpu_initialized);
-+	preempt_enable_no_resched();
-+	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-+	cpu_bringup();
++	move_native_irq(irq);
++	ack_APIC_irq();
 +}
-+#else
-+static inline void play_dead(void)
++
++static void ack_ioapic_quirk_irq(unsigned int irq)
 +{
-+	BUG();
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
++	unsigned long v;
++	int i;
 +
++	move_native_irq(irq);
 +/*
-+ * The idle thread. There's no useful work to be
-+ * done, so just try to conserve power and have a
-+ * low exit latency (ie sit in a loop waiting for
-+ * somebody to say that they'd like to reschedule)
++ * It appears there is an erratum which affects at least version 0x11
++ * of I/O APIC (that's the 82093AA and cores integrated into various
++ * chipsets).  Under certain conditions a level-triggered interrupt is
++ * erroneously delivered as edge-triggered one but the respective IRR
++ * bit gets set nevertheless.  As a result the I/O unit expects an EOI
++ * message but it will never arrive and further interrupts are blocked
++ * from the source.  The exact reason is so far unknown, but the
++ * phenomenon was observed when two consecutive interrupt requests
++ * from a given source get delivered to the same CPU and the source is
++ * temporarily disabled in between.
++ *
++ * A workaround is to simulate an EOI message manually.  We achieve it
++ * by setting the trigger mode to edge and then to level when the edge
++ * trigger mode gets detected in the TMR of a local APIC for a
++ * level-triggered interrupt.  We mask the source for the time of the
++ * operation to prevent an edge-triggered interrupt escaping meanwhile.
++ * The idea is from Manfred Spraul.  --macro
 + */
-+void cpu_idle(void)
-+{
-+	int cpu = smp_processor_id();
++	i = irq_vector[irq];
 +
-+	current_thread_info()->status |= TS_POLLING;
++	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
 +
++	ack_APIC_irq();
 +
-+	/* endless idle loop with no priority at all */
-+	while (1) {
-+		while (!need_resched()) {
++	if (!(v & (1 << (i & 0x1f)))) {
++		atomic_inc(&irq_mis_count);
++		spin_lock(&ioapic_lock);
++		__mask_and_edge_IO_APIC_irq(irq);
++		__unmask_and_level_IO_APIC_irq(irq);
++		spin_unlock(&ioapic_lock);
++	}
++}
 +
-+			if (__get_cpu_var(cpu_idle_state))
-+				__get_cpu_var(cpu_idle_state) = 0;
++static int ioapic_retrigger_irq(unsigned int irq)
++{
++	send_IPI_self(irq_vector[irq]);
 +
-+			rmb();
++	return 1;
++}
 +
-+			if (cpu_is_offline(cpu))
-+				play_dead();
++static struct irq_chip ioapic_chip __read_mostly = {
++	.name 		= "IO-APIC",
++	.startup 	= startup_ioapic_irq,
++	.mask	 	= mask_IO_APIC_irq,
++	.unmask	 	= unmask_IO_APIC_irq,
++	.ack 		= ack_ioapic_irq,
++	.eoi 		= ack_ioapic_quirk_irq,
++#ifdef CONFIG_SMP
++	.set_affinity 	= set_ioapic_affinity_irq,
++#endif
++	.retrigger	= ioapic_retrigger_irq,
++};
 +
-+			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
-+			xen_idle();
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++	int irq;
++
++	/*
++	 * NOTE! The local APIC isn't very good at handling
++	 * multiple interrupts at the same interrupt level.
++	 * As the interrupt level is determined by taking the
++	 * vector number and shifting that right by 4, we
++	 * want to spread these out a bit so that they don't
++	 * all fall in the same interrupt level.
++	 *
++	 * Also, we've got to be careful not to trash gate
++	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
++	 */
++	for (irq = 0; irq < NR_IRQS ; irq++) {
++		int tmp = irq;
++		if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) {
++			/*
++			 * Hmm.. We don't have an entry for this,
++			 * so default to an old-fashioned 8259
++			 * interrupt if we can..
++			 */
++			if (irq < 16)
++				make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++			else
++				/* Strange. Oh, well.. */
++				irq_desc[irq].chip = &no_irq_chip;
++#endif
 +		}
-+		preempt_enable_no_resched();
-+		schedule();
-+		preempt_disable();
 +	}
 +}
 +
-+void cpu_idle_wait(void)
++#ifndef CONFIG_XEN
++/*
++ * The local APIC irq-chip implementation:
++ */
++
++static void ack_apic(unsigned int irq)
 +{
-+	unsigned int cpu, this_cpu = get_cpu();
-+	cpumask_t map;
++	ack_APIC_irq();
++}
 +
-+	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
-+	put_cpu();
++static void mask_lapic_irq (unsigned int irq)
++{
++	unsigned long v;
 +
-+	cpus_clear(map);
-+	for_each_online_cpu(cpu) {
-+		per_cpu(cpu_idle_state, cpu) = 1;
-+		cpu_set(cpu, map);
-+	}
++	v = apic_read(APIC_LVT0);
++	apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
++}
 +
-+	__get_cpu_var(cpu_idle_state) = 0;
++static void unmask_lapic_irq (unsigned int irq)
++{
++	unsigned long v;
 +
-+	wmb();
-+	do {
-+		ssleep(1);
-+		for_each_online_cpu(cpu) {
-+			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
-+				cpu_clear(cpu, map);
-+		}
-+		cpus_and(map, map, cpu_online_map);
-+	} while (!cpus_empty(map));
++	v = apic_read(APIC_LVT0);
++	apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
 +}
-+EXPORT_SYMBOL_GPL(cpu_idle_wait);
 +
-+/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
-+/* Always use xen_idle() instead. */
-+void __devinit select_idle_routine(const struct cpuinfo_x86 *c) {}
++static struct irq_chip lapic_chip __read_mostly = {
++	.name		= "local-APIC-edge",
++	.mask		= mask_lapic_irq,
++	.unmask		= unmask_lapic_irq,
++	.eoi		= ack_apic,
++};
 +
-+void show_regs(struct pt_regs * regs)
++static void setup_nmi (void)
 +{
-+	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
-+
-+	printk("\n");
-+	printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
-+	printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
-+	print_symbol("EIP is at %s\n", regs->eip);
++	/*
++ 	 * Dirty trick to enable the NMI watchdog ...
++	 * We put the 8259A master into AEOI mode and
++	 * unmask on all local APICs LVT0 as NMI.
++	 *
++	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++	 * is from Maciej W. Rozycki - so we do not have to EOI from
++	 * the NMI handler or the timer interrupt.
++	 */ 
++	apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
 +
-+	if (user_mode_vm(regs))
-+		printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
-+	printk(" EFLAGS: %08lx    %s  (%s %.*s)\n",
-+	       regs->eflags, print_tainted(), system_utsname.release,
-+	       (int)strcspn(system_utsname.version, " "),
-+	       system_utsname.version);
-+	printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
-+		regs->eax,regs->ebx,regs->ecx,regs->edx);
-+	printk("ESI: %08lx EDI: %08lx EBP: %08lx",
-+		regs->esi, regs->edi, regs->ebp);
-+	printk(" DS: %04x ES: %04x\n",
-+		0xffff & regs->xds,0xffff & regs->xes);
++	on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
 +
-+	cr0 = read_cr0();
-+	cr2 = read_cr2();
-+	cr3 = read_cr3();
-+	cr4 = read_cr4_safe();
-+	printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
-+	show_trace(NULL, regs, &regs->esp);
++	apic_printk(APIC_VERBOSE, " done.\n");
 +}
 +
 +/*
-+ * This gets run with %ebx containing the
-+ * function to call, and %edx containing
-+ * the "args".
-+ */
-+extern void kernel_thread_helper(void);
-+__asm__(".section .text\n"
-+	".align 4\n"
-+	"kernel_thread_helper:\n\t"
-+	"movl %edx,%eax\n\t"
-+	"pushl %edx\n\t"
-+	"call *%ebx\n\t"
-+	"pushl %eax\n\t"
-+	"call do_exit\n"
-+	".previous");
-+
-+/*
-+ * Create a kernel thread
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic.  ICR does
++ * not support the ExtINT mode, unfortunately.  We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA.  --macro
 + */
-+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++static inline void unlock_ExtINT_logic(void)
 +{
-+	struct pt_regs regs;
++	int apic, pin, i;
++	struct IO_APIC_route_entry entry0, entry1;
++	unsigned char save_control, save_freq_select;
 +
-+	memset(&regs, 0, sizeof(regs));
++	pin  = find_isa_irq_pin(8, mp_INT);
++	if (pin == -1) {
++		WARN_ON_ONCE(1);
++		return;
++	}
++	apic = find_isa_irq_apic(8, mp_INT);
++	if (apic == -1) {
++		WARN_ON_ONCE(1);
++		return;
++	}
 +
-+	regs.ebx = (unsigned long) fn;
-+	regs.edx = (unsigned long) arg;
++	entry0 = ioapic_read_entry(apic, pin);
++	clear_IO_APIC_pin(apic, pin);
 +
-+	regs.xds = __USER_DS;
-+	regs.xes = __USER_DS;
-+	regs.orig_eax = -1;
-+	regs.eip = (unsigned long) kernel_thread_helper;
-+	regs.xcs = GET_KERNEL_CS();
-+	regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
++	memset(&entry1, 0, sizeof(entry1));
 +
-+	/* Ok, create the new process.. */
-+	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-+}
-+EXPORT_SYMBOL(kernel_thread);
++	entry1.dest_mode = 0;			/* physical delivery */
++	entry1.mask = 0;			/* unmask IRQ now */
++	entry1.dest.physical.physical_dest = hard_smp_processor_id();
++	entry1.delivery_mode = dest_ExtINT;
++	entry1.polarity = entry0.polarity;
++	entry1.trigger = 0;
++	entry1.vector = 0;
 +
-+/*
-+ * Free current thread data structures etc..
-+ */
-+void exit_thread(void)
-+{
-+	/* The process may have allocated an io port bitmap... nuke it. */
-+	if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
-+		struct task_struct *tsk = current;
-+		struct thread_struct *t = &tsk->thread;
++	ioapic_write_entry(apic, pin, entry1);
 +
-+		struct physdev_set_iobitmap set_iobitmap = { 0 };
-+		HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
-+		kfree(t->io_bitmap_ptr);
-+		t->io_bitmap_ptr = NULL;
-+		clear_thread_flag(TIF_IO_BITMAP);
++	save_control = CMOS_READ(RTC_CONTROL);
++	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++		   RTC_FREQ_SELECT);
++	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++	i = 100;
++	while (i-- > 0) {
++		mdelay(10);
++		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++			i -= 10;
 +	}
-+}
 +
-+void flush_thread(void)
-+{
-+	struct task_struct *tsk = current;
++	CMOS_WRITE(save_control, RTC_CONTROL);
++	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++	clear_IO_APIC_pin(apic, pin);
 +
-+	memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
-+	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
-+	clear_tsk_thread_flag(tsk, TIF_DEBUG);
-+	/*
-+	 * Forget coprocessor state..
-+	 */
-+	clear_fpu(tsk);
-+	clear_used_math();
++	ioapic_write_entry(apic, pin, entry0);
 +}
++#endif /* !CONFIG_XEN */
 +
-+void release_thread(struct task_struct *dead_task)
-+{
-+	BUG_ON(dead_task->mm);
-+	release_vm86_irqs(dead_task);
-+}
++int timer_uses_ioapic_pin_0;
 +
++#ifndef CONFIG_XEN
 +/*
-+ * This gets called before we allocate a new thread and copy
-+ * the current task into it.
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
++ * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
 + */
-+void prepare_to_copy(struct task_struct *tsk)
++static inline void __init check_timer(void)
 +{
-+	unlazy_fpu(tsk);
-+}
++	int apic1, pin1, apic2, pin2;
++	int vector;
 +
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
-+	unsigned long unused,
-+	struct task_struct * p, struct pt_regs * regs)
-+{
-+	struct pt_regs * childregs;
-+	struct task_struct *tsk;
-+	int err;
++	/*
++	 * get/set the timer IRQ vector:
++	 */
++	disable_8259A_irq(0);
++	vector = assign_irq_vector(0);
++	set_intr_gate(vector, interrupt[0]);
 +
-+	childregs = task_pt_regs(p);
-+	*childregs = *regs;
-+	childregs->eax = 0;
-+	childregs->esp = esp;
++	/*
++	 * Subtle, code in do_timer_interrupt() expects an AEOI
++	 * mode for the 8259A whenever interrupts are routed
++	 * through I/O APICs.  Also IRQ0 has to be enabled in
++	 * the 8259A which implies the virtual wire has to be
++	 * disabled in the local APIC.
++	 */
++	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++	init_8259A(1);
++	timer_ack = 1;
++	if (timer_over_8254 > 0)
++		enable_8259A_irq(0);
 +
-+	p->thread.esp = (unsigned long) childregs;
-+	p->thread.esp0 = (unsigned long) (childregs+1);
++	pin1  = find_isa_irq_pin(0, mp_INT);
++	apic1 = find_isa_irq_apic(0, mp_INT);
++	pin2  = ioapic_i8259.pin;
++	apic2 = ioapic_i8259.apic;
 +
-+	p->thread.eip = (unsigned long) ret_from_fork;
++	if (pin1 == 0)
++		timer_uses_ioapic_pin_0 = 1;
 +
-+	savesegment(fs,p->thread.fs);
-+	savesegment(gs,p->thread.gs);
++	printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
++		vector, apic1, pin1, apic2, pin2);
 +
-+	tsk = current;
-+	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
-+		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+		if (!p->thread.io_bitmap_ptr) {
-+			p->thread.io_bitmap_max = 0;
-+			return -ENOMEM;
++	if (pin1 != -1) {
++		/*
++		 * Ok, does IRQ0 through the IOAPIC work?
++		 */
++		unmask_IO_APIC_irq(0);
++		if (timer_irq_works()) {
++			if (nmi_watchdog == NMI_IO_APIC) {
++				disable_8259A_irq(0);
++				setup_nmi();
++				enable_8259A_irq(0);
++			}
++			if (disable_timer_pin_1 > 0)
++				clear_IO_APIC_pin(0, pin1);
++			return;
 +		}
-+		memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
-+			IO_BITMAP_BYTES);
-+		set_tsk_thread_flag(p, TIF_IO_BITMAP);
++		clear_IO_APIC_pin(apic1, pin1);
++		printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
++				"IO-APIC\n");
++	}
++
++	printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
++	if (pin2 != -1) {
++		printk("\n..... (found pin %d) ...", pin2);
++		/*
++		 * legacy devices should be connected to IO APIC #0
++		 */
++		setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
++		if (timer_irq_works()) {
++			printk("works.\n");
++			if (pin1 != -1)
++				replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
++			else
++				add_pin_to_irq(0, apic2, pin2);
++			if (nmi_watchdog == NMI_IO_APIC) {
++				setup_nmi();
++			}
++			return;
++		}
++		/*
++		 * Cleanup, just in case ...
++		 */
++		clear_IO_APIC_pin(apic2, pin2);
++	}
++	printk(" failed.\n");
++
++	if (nmi_watchdog == NMI_IO_APIC) {
++		printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++		nmi_watchdog = 0;
 +	}
 +
-+	/*
-+	 * Set a new TLS for the child thread?
-+	 */
-+	if (clone_flags & CLONE_SETTLS) {
-+		struct desc_struct *desc;
-+		struct user_desc info;
-+		int idx;
-+
-+		err = -EFAULT;
-+		if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
-+			goto out;
-+		err = -EINVAL;
-+		if (LDT_empty(&info))
-+			goto out;
++	printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
 +
-+		idx = info.entry_number;
-+		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+			goto out;
++	disable_8259A_irq(0);
++	set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq,
++				      "fasteio");
++	apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);	/* Fixed mode */
++	enable_8259A_irq(0);
 +
-+		desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-+		desc->a = LDT_entry_a(&info);
-+		desc->b = LDT_entry_b(&info);
++	if (timer_irq_works()) {
++		printk(" works.\n");
++		return;
 +	}
++	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++	printk(" failed.\n");
 +
-+	p->thread.iopl = current->thread.iopl;
++	printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
 +
-+	err = 0;
-+ out:
-+	if (err && p->thread.io_bitmap_ptr) {
-+		kfree(p->thread.io_bitmap_ptr);
-+		p->thread.io_bitmap_max = 0;
++	timer_ack = 0;
++	init_8259A(0);
++	make_8259A_irq(0);
++	apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
++
++	unlock_ExtINT_logic();
++
++	if (timer_irq_works()) {
++		printk(" works.\n");
++		return;
 +	}
-+	return err;
++	printk(" failed :(.\n");
++	panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
++		"report.  Then try booting with the 'noapic' option");
 +}
++#else
++#define check_timer() ((void)0)
++#endif /* CONFIG_XEN */
 +
 +/*
-+ * fill in the user structure for a core dump..
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ *   Linux doesn't really care, as it's not actually used
++ *   for any interrupt handling anyway.
 + */
-+void dump_thread(struct pt_regs * regs, struct user * dump)
-+{
-+	int i;
++#define PIC_IRQS	(1 << PIC_CASCADE_IR)
 +
-+/* changed the size calculations - should hopefully work better. lbt */
-+	dump->magic = CMAGIC;
-+	dump->start_code = 0;
-+	dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
-+	dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
-+	dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
-+	dump->u_dsize -= dump->u_tsize;
-+	dump->u_ssize = 0;
-+	for (i = 0; i < 8; i++)
-+		dump->u_debugreg[i] = current->thread.debugreg[i];  
++void __init setup_IO_APIC(void)
++{
++	enable_IO_APIC();
 +
-+	if (dump->start_stack < TASK_SIZE)
-+		dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
++	if (acpi_ioapic)
++		io_apic_irqs = ~0;	/* all IRQs go through IOAPIC */
++	else
++		io_apic_irqs = ~PIC_IRQS;
 +
-+	dump->regs.ebx = regs->ebx;
-+	dump->regs.ecx = regs->ecx;
-+	dump->regs.edx = regs->edx;
-+	dump->regs.esi = regs->esi;
-+	dump->regs.edi = regs->edi;
-+	dump->regs.ebp = regs->ebp;
-+	dump->regs.eax = regs->eax;
-+	dump->regs.ds = regs->xds;
-+	dump->regs.es = regs->xes;
-+	savesegment(fs,dump->regs.fs);
-+	savesegment(gs,dump->regs.gs);
-+	dump->regs.orig_eax = regs->orig_eax;
-+	dump->regs.eip = regs->eip;
-+	dump->regs.cs = regs->xcs;
-+	dump->regs.eflags = regs->eflags;
-+	dump->regs.esp = regs->esp;
-+	dump->regs.ss = regs->xss;
++	printk("ENABLING IO-APIC IRQs\n");
 +
-+	dump->u_fpvalid = dump_fpu (regs, &dump->i387);
++	/*
++	 * Set up IO-APIC IRQ routing.
++	 */
++	if (!acpi_ioapic)
++		setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++	sync_Arb_IDs();
++#endif
++	setup_IO_APIC_irqs();
++	init_IO_APIC_traps();
++	check_timer();
++	if (!acpi_ioapic)
++		print_IO_APIC();
 +}
-+EXPORT_SYMBOL(dump_thread);
 +
-+/* 
-+ * Capture the user space registers if the task is not running (in user space)
-+ */
-+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++static int __init setup_disable_8254_timer(char *s)
 +{
-+	struct pt_regs ptregs = *task_pt_regs(tsk);
-+	ptregs.xcs &= 0xffff;
-+	ptregs.xds &= 0xffff;
-+	ptregs.xes &= 0xffff;
-+	ptregs.xss &= 0xffff;
-+
-+	elf_core_copy_regs(regs, &ptregs);
-+
++	timer_over_8254 = -1;
 +	return 1;
 +}
++static int __init setup_enable_8254_timer(char *s)
++{
++	timer_over_8254 = 2;
++	return 1;
++}
++
++__setup("disable_8254_timer", setup_disable_8254_timer);
++__setup("enable_8254_timer", setup_enable_8254_timer);
 +
 +/*
-+ * This function selects if the context switch from prev to next
-+ * has to tweak the TSC disable bit in the cr4.
++ *	Called after all the initialization is done. If we didnt find any
++ *	APIC bugs then we can allow the modify fast path
 + */
-+static inline void disable_tsc(struct task_struct *prev_p,
-+			       struct task_struct *next_p)
++ 
++static int __init io_apic_bug_finalize(void)
 +{
-+	struct thread_info *prev, *next;
-+
-+	/*
-+	 * gcc should eliminate the ->thread_info dereference if
-+	 * has_secure_computing returns 0 at compile time (SECCOMP=n).
-+	 */
-+	prev = task_thread_info(prev_p);
-+	next = task_thread_info(next_p);
-+
-+	if (has_secure_computing(prev) || has_secure_computing(next)) {
-+		/* slow path here */
-+		if (has_secure_computing(prev) &&
-+		    !has_secure_computing(next)) {
-+			write_cr4(read_cr4() & ~X86_CR4_TSD);
-+		} else if (!has_secure_computing(prev) &&
-+			   has_secure_computing(next))
-+			write_cr4(read_cr4() | X86_CR4_TSD);
++	if(sis_apic_bug == -1)
++		sis_apic_bug = 0;
++	if (is_initial_xendomain()) {
++		dom0_op_t op = { .cmd = DOM0_PLATFORM_QUIRK };
++		op.u.platform_quirk.quirk_id = sis_apic_bug ?
++			QUIRK_IOAPIC_BAD_REGSEL : QUIRK_IOAPIC_GOOD_REGSEL;
++		HYPERVISOR_dom0_op(&op);
 +	}
++	return 0;
 +}
 +
-+/*
-+ *	switch_to(x,yn) should switch tasks from x to y.
-+ *
-+ * We fsave/fwait so that an exception goes off at the right time
-+ * (as a call from the fsave or fwait in effect) rather than to
-+ * the wrong process. Lazy FP saving no longer makes any sense
-+ * with modern CPU's, and this simplifies a lot of things (SMP
-+ * and UP become the same).
-+ *
-+ * NOTE! We used to use the x86 hardware context switching. The
-+ * reason for not using it any more becomes apparent when you
-+ * try to recover gracefully from saved state that is no longer
-+ * valid (stale segment register values in particular). With the
-+ * hardware task-switch, there is no way to fix up bad state in
-+ * a reasonable manner.
-+ *
-+ * The fact that Intel documents the hardware task-switching to
-+ * be slow is a fairly red herring - this code is not noticeably
-+ * faster. However, there _is_ some room for improvement here,
-+ * so the performance issues may eventually be a valid point.
-+ * More important, however, is the fact that this allows us much
-+ * more flexibility.
-+ *
-+ * The return value (in %eax) will be the "prev" task after
-+ * the task-switch, and shows up in ret_from_fork in entry.S,
-+ * for example.
-+ */
-+struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+	struct thread_struct *prev = &prev_p->thread,
-+				 *next = &next_p->thread;
-+	int cpu = smp_processor_id();
-+#ifndef CONFIG_X86_NO_TSS
-+	struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+#endif
-+	struct physdev_set_iopl iopl_op;
-+	struct physdev_set_iobitmap iobmp_op;
-+	multicall_entry_t _mcl[8], *mcl = _mcl;
++late_initcall(io_apic_bug_finalize);
 +
-+	/* XEN NOTE: FS/GS saved in switch_mm(), not here. */
++struct sysfs_ioapic_data {
++	struct sys_device dev;
++	struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
 +
-+	/*
-+	 * This is basically '__unlazy_fpu', except that we queue a
-+	 * multicall to indicate FPU task switch, rather than
-+	 * synchronously trapping to Xen.
-+	 */
-+	if (prev_p->thread_info->status & TS_USEDFPU) {
-+		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
-+		mcl->op      = __HYPERVISOR_fpu_taskswitch;
-+		mcl->args[0] = 1;
-+		mcl++;
-+	}
-+#if 0 /* lazy fpu sanity check */
-+	else BUG_ON(!(read_cr0() & 8));
-+#endif
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++	struct IO_APIC_route_entry *entry;
++	struct sysfs_ioapic_data *data;
++	int i;
++	
++	data = container_of(dev, struct sysfs_ioapic_data, dev);
++	entry = data->entry;
++	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
++		entry[i] = ioapic_read_entry(dev->id, i);
 +
-+	/*
-+	 * Reload esp0.
-+	 * This is load_esp0(tss, next) with a multicall.
-+	 */
-+	mcl->op      = __HYPERVISOR_stack_switch;
-+	mcl->args[0] = __KERNEL_DS;
-+	mcl->args[1] = next->esp0;
-+	mcl++;
++	return 0;
++}
 +
-+	/*
-+	 * Load the per-thread Thread-Local Storage descriptor.
-+	 * This is load_TLS(next, cpu) with multicalls.
-+	 */
-+#define C(i) do {							\
-+	if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||	\
-+		     next->tls_array[i].b != prev->tls_array[i].b)) {	\
-+		mcl->op = __HYPERVISOR_update_descriptor;		\
-+		*(u64 *)&mcl->args[0] =	virt_to_machine(		\
-+			&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
-+		*(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i];	\
-+		mcl++;							\
-+	}								\
-+} while (0)
-+	C(0); C(1); C(2);
-+#undef C
++static int ioapic_resume(struct sys_device *dev)
++{
++	struct IO_APIC_route_entry *entry;
++	struct sysfs_ioapic_data *data;
++	unsigned long flags;
++	union IO_APIC_reg_00 reg_00;
++	int i;
++	
++	data = container_of(dev, struct sysfs_ioapic_data, dev);
++	entry = data->entry;
 +
-+	if (unlikely(prev->iopl != next->iopl)) {
-+		iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3;
-+		mcl->op      = __HYPERVISOR_physdev_op;
-+		mcl->args[0] = PHYSDEVOP_set_iopl;
-+		mcl->args[1] = (unsigned long)&iopl_op;
-+		mcl++;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_00.raw = io_apic_read(dev->id, 0);
++	if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++		reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++		io_apic_write(dev->id, 0, reg_00.raw);
 +	}
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
++		ioapic_write_entry(dev->id, i, entry[i]);
 +
-+	if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
-+		iobmp_op.bitmap   = (char *)next->io_bitmap_ptr;
-+		iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
-+		mcl->op      = __HYPERVISOR_physdev_op;
-+		mcl->args[0] = PHYSDEVOP_set_iobitmap;
-+		mcl->args[1] = (unsigned long)&iobmp_op;
-+		mcl++;
-+	}
++	return 0;
++}
 +
-+	(void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
++static struct sysdev_class ioapic_sysdev_class = {
++	set_kset_name("ioapic"),
++	.suspend = ioapic_suspend,
++	.resume = ioapic_resume,
++};
 +
-+	/*
-+	 * Restore %fs and %gs if needed.
-+	 *
-+	 * Glibc normally makes %fs be zero, and %gs is one of
-+	 * the TLS segments.
-+	 */
-+	if (unlikely(next->fs))
-+		loadsegment(fs, next->fs);
++static int __init ioapic_init_sysfs(void)
++{
++	struct sys_device * dev;
++	int i, size, error = 0;
 +
-+	if (next->gs)
-+		loadsegment(gs, next->gs);
++	error = sysdev_class_register(&ioapic_sysdev_class);
++	if (error)
++		return error;
 +
-+	/*
-+	 * Now maybe reload the debug registers
-+	 */
-+	if (unlikely(next->debugreg[7])) {
-+		set_debugreg(next->debugreg[0], 0);
-+		set_debugreg(next->debugreg[1], 1);
-+		set_debugreg(next->debugreg[2], 2);
-+		set_debugreg(next->debugreg[3], 3);
-+		/* no 4 and 5 */
-+		set_debugreg(next->debugreg[6], 6);
-+		set_debugreg(next->debugreg[7], 7);
++	for (i = 0; i < nr_ioapics; i++ ) {
++		size = sizeof(struct sys_device) + nr_ioapic_registers[i] 
++			* sizeof(struct IO_APIC_route_entry);
++		mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++		if (!mp_ioapic_data[i]) {
++			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++			continue;
++		}
++		memset(mp_ioapic_data[i], 0, size);
++		dev = &mp_ioapic_data[i]->dev;
++		dev->id = i; 
++		dev->cls = &ioapic_sysdev_class;
++		error = sysdev_register(dev);
++		if (error) {
++			kfree(mp_ioapic_data[i]);
++			mp_ioapic_data[i] = NULL;
++			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++			continue;
++		}
 +	}
 +
-+	disable_tsc(prev_p, next_p);
-+
-+	return prev_p;
-+}
-+
-+asmlinkage int sys_fork(struct pt_regs regs)
-+{
-+	return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++	return 0;
 +}
 +
-+asmlinkage int sys_clone(struct pt_regs regs)
-+{
-+	unsigned long clone_flags;
-+	unsigned long newsp;
-+	int __user *parent_tidptr, *child_tidptr;
-+
-+	clone_flags = regs.ebx;
-+	newsp = regs.ecx;
-+	parent_tidptr = (int __user *)regs.edx;
-+	child_tidptr = (int __user *)regs.edi;
-+	if (!newsp)
-+		newsp = regs.esp;
-+	return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
-+}
++device_initcall(ioapic_init_sysfs);
 +
 +/*
-+ * This is trivial, and on the face of it looks like it
-+ * could equally well be done in user mode.
-+ *
-+ * Not so, for quite unobvious reasons - register pressure.
-+ * In user mode vfork() cannot have a stack frame, and if
-+ * done by calling the "clone()" system call directly, you
-+ * do not have enough call-clobbered registers to hold all
-+ * the information you need.
++ * Dynamic irq allocate and deallocation
 + */
-+asmlinkage int sys_vfork(struct pt_regs regs)
++int create_irq(void)
 +{
-+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
-+}
++	/* Allocate an unused irq */
++	int irq, new, vector = 0;
++	unsigned long flags;
 +
-+/*
-+ * sys_execve() executes a new program.
-+ */
-+asmlinkage int sys_execve(struct pt_regs regs)
-+{
-+	int error;
-+	char * filename;
++	irq = -ENOSPC;
++	spin_lock_irqsave(&vector_lock, flags);
++	for (new = (NR_IRQS - 1); new >= 0; new--) {
++		if (platform_legacy_irq(new))
++			continue;
++		if (irq_vector[new] != 0)
++			continue;
++		vector = __assign_irq_vector(new);
++		if (likely(vector > 0))
++			irq = new;
++		break;
++	}
++	spin_unlock_irqrestore(&vector_lock, flags);
 +
-+	filename = getname((char __user *) regs.ebx);
-+	error = PTR_ERR(filename);
-+	if (IS_ERR(filename))
-+		goto out;
-+	error = do_execve(filename,
-+			(char __user * __user *) regs.ecx,
-+			(char __user * __user *) regs.edx,
-+			&regs);
-+	if (error == 0) {
-+		task_lock(current);
-+		current->ptrace &= ~PT_DTRACE;
-+		task_unlock(current);
-+		/* Make sure we don't return using sysenter.. */
-+		set_thread_flag(TIF_IRET);
++	if (irq >= 0) {
++#ifndef CONFIG_XEN
++		set_intr_gate(vector, interrupt[irq]);
++#endif
++		dynamic_irq_init(irq);
 +	}
-+	putname(filename);
-+out:
-+	return error;
++	return irq;
 +}
 +
-+#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
-+#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
-+
-+unsigned long get_wchan(struct task_struct *p)
++void destroy_irq(unsigned int irq)
 +{
-+	unsigned long ebp, esp, eip;
-+	unsigned long stack_page;
-+	int count = 0;
-+	if (!p || p == current || p->state == TASK_RUNNING)
-+		return 0;
-+	stack_page = (unsigned long)task_stack_page(p);
-+	esp = p->thread.esp;
-+	if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
-+		return 0;
-+	/* include/asm-i386/system.h:switch_to() pushes ebp last. */
-+	ebp = *(unsigned long *) esp;
-+	do {
-+		if (ebp < stack_page || ebp > top_ebp+stack_page)
-+			return 0;
-+		eip = *(unsigned long *) (ebp+4);
-+		if (!in_sched_functions(eip))
-+			return eip;
-+		ebp = *(unsigned long *) ebp;
-+	} while (count++ < 16);
-+	return 0;
++	unsigned long flags;
++
++	dynamic_irq_cleanup(irq);
++
++	spin_lock_irqsave(&vector_lock, flags);
++	irq_vector[irq] = 0;
++	spin_unlock_irqrestore(&vector_lock, flags);
 +}
 +
 +/*
-+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
++ * MSI mesage composition
 + */
-+static int get_free_idx(void)
++#ifdef CONFIG_PCI_MSI
++static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
 +{
-+	struct thread_struct *t = &current->thread;
-+	int idx;
++	int vector;
++	unsigned dest;
 +
-+	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
-+		if (desc_empty(t->tls_array + idx))
-+			return idx + GDT_ENTRY_TLS_MIN;
-+	return -ESRCH;
++	vector = assign_irq_vector(irq);
++	if (vector >= 0) {
++		dest = cpu_mask_to_apicid(TARGET_CPUS);
++
++		msg->address_hi = MSI_ADDR_BASE_HI;
++		msg->address_lo =
++			MSI_ADDR_BASE_LO |
++			((INT_DEST_MODE == 0) ?
++				MSI_ADDR_DEST_MODE_PHYSICAL:
++				MSI_ADDR_DEST_MODE_LOGICAL) |
++			((INT_DELIVERY_MODE != dest_LowestPrio) ?
++				MSI_ADDR_REDIRECTION_CPU:
++				MSI_ADDR_REDIRECTION_LOWPRI) |
++			MSI_ADDR_DEST_ID(dest);
++
++		msg->data =
++			MSI_DATA_TRIGGER_EDGE |
++			MSI_DATA_LEVEL_ASSERT |
++			((INT_DELIVERY_MODE != dest_LowestPrio) ?
++				MSI_DATA_DELIVERY_FIXED:
++				MSI_DATA_DELIVERY_LOWPRI) |
++			MSI_DATA_VECTOR(vector);
++	}
++	return vector;
 +}
 +
-+/*
-+ * Set a given TLS descriptor:
-+ */
-+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
++#ifdef CONFIG_SMP
++static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
 +{
-+	struct thread_struct *t = &current->thread;
-+	struct user_desc info;
-+	struct desc_struct *desc;
-+	int cpu, idx;
++	struct msi_msg msg;
++	unsigned int dest;
++	cpumask_t tmp;
++	int vector;
 +
-+	if (copy_from_user(&info, u_info, sizeof(info)))
-+		return -EFAULT;
-+	idx = info.entry_number;
++	cpus_and(tmp, mask, cpu_online_map);
++	if (cpus_empty(tmp))
++		tmp = TARGET_CPUS;
 +
-+	/*
-+	 * index -1 means the kernel should try to find and
-+	 * allocate an empty descriptor:
-+	 */
-+	if (idx == -1) {
-+		idx = get_free_idx();
-+		if (idx < 0)
-+			return idx;
-+		if (put_user(idx, &u_info->entry_number))
-+			return -EFAULT;
-+	}
++	vector = assign_irq_vector(irq);
++	if (vector < 0)
++		return;
 +
-+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+		return -EINVAL;
++	dest = cpu_mask_to_apicid(mask);
 +
-+	desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
++	read_msi_msg(irq, &msg);
 +
-+	/*
-+	 * We must not get preempted while modifying the TLS.
-+	 */
-+	cpu = get_cpu();
++	msg.data &= ~MSI_DATA_VECTOR_MASK;
++	msg.data |= MSI_DATA_VECTOR(vector);
++	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
++	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
 +
-+	if (LDT_empty(&info)) {
-+		desc->a = 0;
-+		desc->b = 0;
-+	} else {
-+		desc->a = LDT_entry_a(&info);
-+		desc->b = LDT_entry_b(&info);
-+	}
-+	load_TLS(t, cpu);
++	write_msi_msg(irq, &msg);
++	set_native_irq_info(irq, mask);
++}
++#endif /* CONFIG_SMP */
 +
-+	put_cpu();
++/*
++ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
++ * which implement the MSI or MSI-X Capability Structure.
++ */
++static struct irq_chip msi_chip = {
++	.name		= "PCI-MSI",
++	.unmask		= unmask_msi_irq,
++	.mask		= mask_msi_irq,
++	.ack		= ack_ioapic_irq,
++#ifdef CONFIG_SMP
++	.set_affinity	= set_msi_irq_affinity,
++#endif
++	.retrigger	= ioapic_retrigger_irq,
++};
++
++int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
++{
++	struct msi_msg msg;
++	int ret;
++	ret = msi_compose_msg(dev, irq, &msg);
++	if (ret < 0)
++		return ret;
++
++	write_msi_msg(irq, &msg);
++
++	set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
++				      "edge");
 +
 +	return 0;
 +}
 +
++void arch_teardown_msi_irq(unsigned int irq)
++{
++	return;
++}
++
++#endif /* CONFIG_PCI_MSI */
++
 +/*
-+ * Get the current Thread-Local Storage area:
++ * Hypertransport interrupt support
 + */
++#ifdef CONFIG_HT_IRQ
 +
-+#define GET_BASE(desc) ( \
-+	(((desc)->a >> 16) & 0x0000ffff) | \
-+	(((desc)->b << 16) & 0x00ff0000) | \
-+	( (desc)->b        & 0xff000000)   )
-+
-+#define GET_LIMIT(desc) ( \
-+	((desc)->a & 0x0ffff) | \
-+	 ((desc)->b & 0xf0000) )
-+	
-+#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
-+#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
-+#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
-+#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
-+#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
-+#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
++#ifdef CONFIG_SMP
 +
-+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
++static void target_ht_irq(unsigned int irq, unsigned int dest)
 +{
-+	struct user_desc info;
-+	struct desc_struct *desc;
-+	int idx;
++	struct ht_irq_msg msg;
++	fetch_ht_irq_msg(irq, &msg);
 +
-+	if (get_user(idx, &u_info->entry_number))
-+		return -EFAULT;
-+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+		return -EINVAL;
++	msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK);
++	msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
 +
-+	memset(&info, 0, sizeof(info));
++	msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest);
++	msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
 +
-+	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++	write_ht_irq_msg(irq, &msg);
++}
 +
-+	info.entry_number = idx;
-+	info.base_addr = GET_BASE(desc);
-+	info.limit = GET_LIMIT(desc);
-+	info.seg_32bit = GET_32BIT(desc);
-+	info.contents = GET_CONTENTS(desc);
-+	info.read_exec_only = !GET_WRITABLE(desc);
-+	info.limit_in_pages = GET_LIMIT_PAGES(desc);
-+	info.seg_not_present = !GET_PRESENT(desc);
-+	info.useable = GET_USEABLE(desc);
++static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++	unsigned int dest;
++	cpumask_t tmp;
 +
-+	if (copy_to_user(u_info, &info, sizeof(info)))
-+		return -EFAULT;
-+	return 0;
++	cpus_and(tmp, mask, cpu_online_map);
++	if (cpus_empty(tmp))
++		tmp = TARGET_CPUS;
++
++	cpus_and(mask, tmp, CPU_MASK_ALL);
++
++	dest = cpu_mask_to_apicid(mask);
++
++	target_ht_irq(irq, dest);
++	set_native_irq_info(irq, mask);
 +}
++#endif
 +
-+unsigned long arch_align_stack(unsigned long sp)
++static struct irq_chip ht_irq_chip = {
++	.name		= "PCI-HT",
++	.mask		= mask_ht_irq,
++	.unmask		= unmask_ht_irq,
++	.ack		= ack_ioapic_irq,
++#ifdef CONFIG_SMP
++	.set_affinity	= set_ht_irq_affinity,
++#endif
++	.retrigger	= ioapic_retrigger_irq,
++};
++
++int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
 +{
-+	if (randomize_va_space)
-+		sp -= get_random_int() % 8192;
-+	return sp & ~0xf;
++	int vector;
++
++	vector = assign_irq_vector(irq);
++	if (vector >= 0) {
++		struct ht_irq_msg msg;
++		unsigned dest;
++		cpumask_t tmp;
++
++		cpus_clear(tmp);
++		cpu_set(vector >> 8, tmp);
++		dest = cpu_mask_to_apicid(tmp);
++
++		msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
++
++		msg.address_lo =
++			HT_IRQ_LOW_BASE |
++			HT_IRQ_LOW_DEST_ID(dest) |
++			HT_IRQ_LOW_VECTOR(vector) |
++			((INT_DEST_MODE == 0) ?
++				HT_IRQ_LOW_DM_PHYSICAL :
++				HT_IRQ_LOW_DM_LOGICAL) |
++			HT_IRQ_LOW_RQEOI_EDGE |
++			((INT_DELIVERY_MODE != dest_LowestPrio) ?
++				HT_IRQ_LOW_MT_FIXED :
++				HT_IRQ_LOW_MT_ARBITRATED) |
++			HT_IRQ_LOW_IRQ_MASKED;
++
++		write_ht_irq_msg(irq, &msg);
++
++		set_irq_chip_and_handler_name(irq, &ht_irq_chip,
++					      handle_edge_irq, "edge");
++	}
++	return vector;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/quirks-xen.c linux-2.6.18-xen/arch/i386/kernel/quirks-xen.c
---- linux-2.6.18.3/arch/i386/kernel/quirks-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/quirks-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,47 @@
-+/*
-+ * This file contains work-arounds for x86 and x86_64 platform bugs.
-+ */
-+#include <linux/pci.h>
-+#include <linux/irq.h>
++#endif /* CONFIG_HT_IRQ */
 +
-+#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
++/* --------------------------------------------------------------------------
++                          ACPI-based IOAPIC Configuration
++   -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
 +
-+static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
++int __init io_apic_get_unique_id (int ioapic, int apic_id)
 +{
-+	u8 config, rev;
-+	u32 word;
++#ifndef CONFIG_XEN
++	union IO_APIC_reg_00 reg_00;
++	static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
++	physid_mask_t tmp;
++	unsigned long flags;
++	int i = 0;
 +
-+	/* BIOS may enable hardware IRQ balancing for
-+	 * E7520/E7320/E7525(revision ID 0x9 and below)
-+	 * based platforms.
-+	 * Disable SW irqbalance/affinity on those platforms.
++	/*
++	 * The P4 platform supports up to 256 APIC IDs on two separate APIC 
++	 * buses (one for LAPICs, one for IOAPICs), where predecessors only 
++	 * supports up to 16 on one shared APIC bus.
++	 * 
++	 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
++	 *      advantage of new APIC bus architecture.
 +	 */
-+	pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
-+	if (rev > 0x9)
-+		return;
-+
-+	printk(KERN_INFO "Intel E7520/7320/7525 detected.");
 +
-+	/* enable access to config space*/
-+	pci_read_config_byte(dev, 0xf4, &config);
-+	pci_write_config_byte(dev, 0xf4, config|0x2);
++	if (physids_empty(apic_id_map))
++		apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
 +
-+	/* read xTPR register */
-+	raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_00.raw = io_apic_read(ioapic, 0);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
 +
-+	if (!(word & (1 << 13))) {
-+		dom0_op_t op;
-+		printk(KERN_INFO "Disabling irq balancing and affinity\n");
-+		op.cmd = DOM0_PLATFORM_QUIRK;
-+		op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
-+		(void)HYPERVISOR_dom0_op(&op);
++	if (apic_id >= get_physical_broadcast()) {
++		printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
++			"%d\n", ioapic, apic_id, reg_00.bits.ID);
++		apic_id = reg_00.bits.ID;
 +	}
 +
-+	/* put back the original value for config space*/
-+	if (!(config & 0x2))
-+		pci_write_config_byte(dev, 0xf4, config);
-+}
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7320_MCH,	quirk_intel_irqbalance);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7525_MCH,	quirk_intel_irqbalance);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7520_MCH,	quirk_intel_irqbalance);
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/setup-xen.c linux-2.6.18-xen/arch/i386/kernel/setup-xen.c
---- linux-2.6.18.3/arch/i386/kernel/setup-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/setup-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,1835 @@
-+/*
-+ *  linux/arch/i386/kernel/setup.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *
-+ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
-+ *
-+ *  Memory region support
-+ *	David Parsons <orc at pell.chi.il.us>, July-August 1999
-+ *
-+ *  Added E820 sanitization routine (removes overlapping memory regions);
-+ *  Brian Moyle <bmoyle at mvista.com>, February 2001
-+ *
-+ * Moved CPU detection code to cpu/${cpu}.c
-+ *    Patrick Mochel <mochel at osdl.org>, March 2002
-+ *
-+ *  Provisions for empty E820 memory regions (reported by certain BIOSes).
-+ *  Alex Achenbach <xela at slit.de>, December 2002.
-+ *
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of initialization
-+ */
++	/*
++	 * Every APIC in a system must have a unique ID or we get lots of nice 
++	 * 'stuck on smp_invalidate_needed IPI wait' messages.
++	 */
++	if (check_apicid_used(apic_id_map, apic_id)) {
 +
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/mmzone.h>
-+#include <linux/screen_info.h>
-+#include <linux/ioport.h>
-+#include <linux/acpi.h>
-+#include <linux/apm_bios.h>
-+#include <linux/initrd.h>
-+#include <linux/bootmem.h>
-+#include <linux/seq_file.h>
-+#include <linux/platform_device.h>
-+#include <linux/console.h>
-+#include <linux/mca.h>
-+#include <linux/root_dev.h>
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/efi.h>
-+#include <linux/init.h>
-+#include <linux/edd.h>
-+#include <linux/nodemask.h>
-+#include <linux/kernel.h>
-+#include <linux/percpu.h>
-+#include <linux/notifier.h>
-+#include <linux/kexec.h>
-+#include <linux/crash_dump.h>
-+#include <linux/dmi.h>
-+#include <linux/pfn.h>
++		for (i = 0; i < get_physical_broadcast(); i++) {
++			if (!check_apicid_used(apic_id_map, i))
++				break;
++		}
 +
-+#include <video/edid.h>
++		if (i == get_physical_broadcast())
++			panic("Max apic_id exceeded!\n");
 +
-+#include <asm/apic.h>
-+#include <asm/e820.h>
-+#include <asm/mpspec.h>
-+#include <asm/setup.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/sections.h>
-+#include <asm/io_apic.h>
-+#include <asm/ist.h>
-+#include <asm/io.h>
-+#include <asm/hypervisor.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/memory.h>
-+#include <xen/features.h>
-+#include <xen/xencons.h>
-+#include "setup_arch.h"
-+#include <bios_ebda.h>
++		printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
++			"trying %d\n", ioapic, apic_id, i);
 +
-+/* Forward Declaration. */
-+void __init find_max_pfn(void);
++		apic_id = i;
++	} 
 +
-+static int xen_panic_event(struct notifier_block *, unsigned long, void *);
-+static struct notifier_block xen_panic_block = {
-+	xen_panic_event, NULL, 0 /* try to go last */
-+};
++	tmp = apicid_to_cpu_present(apic_id);
++	physids_or(apic_id_map, apic_id_map, tmp);
 +
-+extern char hypercall_page[PAGE_SIZE];
-+EXPORT_SYMBOL(hypercall_page);
++	if (reg_00.bits.ID != apic_id) {
++		reg_00.bits.ID = apic_id;
 +
-+int disable_pse __devinitdata = 0;
++		spin_lock_irqsave(&ioapic_lock, flags);
++		io_apic_write(ioapic, 0, reg_00.raw);
++		reg_00.raw = io_apic_read(ioapic, 0);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
 +
-+/*
-+ * Machine setup..
-+ */
++		/* Sanity check */
++		if (reg_00.bits.ID != apic_id) {
++			printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
++			return -1;
++		}
++	}
 +
-+#ifdef CONFIG_EFI
-+int efi_enabled = 0;
-+EXPORT_SYMBOL(efi_enabled);
-+#endif
++	apic_printk(APIC_VERBOSE, KERN_INFO
++			"IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
++#endif /* !CONFIG_XEN */
 +
-+/* cpu data as detected by the assembly code in head.S */
-+struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
-+/* common cpu data for all cpus */
-+struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
-+EXPORT_SYMBOL(boot_cpu_data);
++	return apic_id;
++}
 +
-+unsigned long mmu_cr4_features;
 +
-+#ifdef	CONFIG_ACPI
-+	int acpi_disabled = 0;
-+#else
-+	int acpi_disabled = 1;
-+#endif
-+EXPORT_SYMBOL(acpi_disabled);
++int __init io_apic_get_version (int ioapic)
++{
++	union IO_APIC_reg_01	reg_01;
++	unsigned long flags;
 +
-+#ifdef	CONFIG_ACPI
-+int __initdata acpi_force = 0;
-+extern acpi_interrupt_flags	acpi_sci_flags;
-+#endif
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_01.raw = io_apic_read(ioapic, 1);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
 +
-+/* for MCA, but anyone else can use it if they want */
-+unsigned int machine_id;
-+#ifdef CONFIG_MCA
-+EXPORT_SYMBOL(machine_id);
-+#endif
-+unsigned int machine_submodel_id;
-+unsigned int BIOS_revision;
-+unsigned int mca_pentium_flag;
++	return reg_01.bits.version;
++}
 +
-+/* For PCI or other memory-mapped resources */
-+unsigned long pci_mem_start = 0x10000000;
-+#ifdef CONFIG_PCI
-+EXPORT_SYMBOL(pci_mem_start);
-+#endif
 +
-+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
-+int bootloader_type;
++int __init io_apic_get_redir_entries (int ioapic)
++{
++	union IO_APIC_reg_01	reg_01;
++	unsigned long flags;
 +
-+/* user-defined highmem size */
-+static unsigned int highmem_pages = -1;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_01.raw = io_apic_read(ioapic, 1);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
 +
-+/*
-+ * Setup options
-+ */
-+struct drive_info_struct { char dummy[32]; } drive_info;
-+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
-+    defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-+EXPORT_SYMBOL(drive_info);
-+#endif
-+struct screen_info screen_info;
-+EXPORT_SYMBOL(screen_info);
-+struct apm_info apm_info;
-+EXPORT_SYMBOL(apm_info);
-+struct sys_desc_table_struct {
-+	unsigned short length;
-+	unsigned char table[0];
-+};
-+struct edid_info edid_info;
-+EXPORT_SYMBOL_GPL(edid_info);
-+struct ist_info ist_info;
-+#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
-+	defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
-+EXPORT_SYMBOL(ist_info);
-+#endif
-+struct e820map e820;
-+static void __init e820_setup_gap(struct e820entry *e820, int nr_map);
-+#ifdef CONFIG_XEN
-+struct e820map machine_e820;
-+#endif
++	return reg_01.bits.entries;
++}
 +
-+extern void early_cpu_init(void);
-+extern void generic_apic_probe(char *);
-+extern int root_mountflags;
 +
-+unsigned long saved_videomode;
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++	struct IO_APIC_route_entry entry;
++	unsigned long flags;
 +
-+#define RAMDISK_IMAGE_START_MASK  	0x07FF
-+#define RAMDISK_PROMPT_FLAG		0x8000
-+#define RAMDISK_LOAD_FLAG		0x4000	
++	if (!IO_APIC_IRQ(irq)) {
++		printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++			ioapic);
++		return -EINVAL;
++	}
 +
-+static char command_line[COMMAND_LINE_SIZE];
++	/*
++	 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++	 * Note that we mask (disable) IRQs now -- these get enabled when the
++	 * corresponding device driver registers for this IRQ.
++	 */
 +
-+unsigned char __initdata boot_params[PARAM_SIZE];
++	memset(&entry,0,sizeof(entry));
 +
-+static struct resource data_resource = {
-+	.name	= "Kernel data",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-+};
++	entry.delivery_mode = INT_DELIVERY_MODE;
++	entry.dest_mode = INT_DEST_MODE;
++	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++	entry.trigger = edge_level;
++	entry.polarity = active_high_low;
++	entry.mask  = 1;
 +
-+static struct resource code_resource = {
-+	.name	= "Kernel code",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-+};
++	/*
++	 * IRQs < 16 are already in the irq_2_pin[] map
++	 */
++	if (irq >= 16)
++		add_pin_to_irq(irq, ioapic, pin);
 +
-+static struct resource system_rom_resource = {
-+	.name	= "System ROM",
-+	.start	= 0xf0000,
-+	.end	= 0xfffff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
++	entry.vector = assign_irq_vector(irq);
 +
-+static struct resource extension_rom_resource = {
-+	.name	= "Extension ROM",
-+	.start	= 0xe0000,
-+	.end	= 0xeffff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
-+
-+static struct resource adapter_rom_resources[] = { {
-+	.name 	= "Adapter ROM",
-+	.start	= 0xc8000,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+	.name 	= "Adapter ROM",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+	.name 	= "Adapter ROM",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+	.name 	= "Adapter ROM",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+	.name 	= "Adapter ROM",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+	.name 	= "Adapter ROM",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+} };
++	apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
++		"(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
++		mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++		edge_level, active_high_low);
 +
-+#define ADAPTER_ROM_RESOURCES \
-+	(sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++	ioapic_register_intr(irq, entry.vector, edge_level);
 +
-+static struct resource video_rom_resource = {
-+	.name 	= "Video ROM",
-+	.start	= 0xc0000,
-+	.end	= 0xc7fff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
++	if (!ioapic && (irq < 16))
++		disable_8259A_irq(irq);
 +
-+static struct resource video_ram_resource = {
-+	.name	= "Video RAM area",
-+	.start	= 0xa0000,
-+	.end	= 0xbffff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+static struct resource standard_io_resources[] = { {
-+	.name	= "dma1",
-+	.start	= 0x0000,
-+	.end	= 0x001f,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "pic1",
-+	.start	= 0x0020,
-+	.end	= 0x0021,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name   = "timer0",
-+	.start	= 0x0040,
-+	.end    = 0x0043,
-+	.flags  = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name   = "timer1",
-+	.start  = 0x0050,
-+	.end    = 0x0053,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "keyboard",
-+	.start	= 0x0060,
-+	.end	= 0x006f,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "dma page reg",
-+	.start	= 0x0080,
-+	.end	= 0x008f,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "pic2",
-+	.start	= 0x00a0,
-+	.end	= 0x00a1,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "dma2",
-+	.start	= 0x00c0,
-+	.end	= 0x00df,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "fpu",
-+	.start	= 0x00f0,
-+	.end	= 0x00ff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+} };
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__ioapic_write_entry(ioapic, pin, entry);
++	set_native_irq_info(irq, TARGET_CPUS);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
 +
-+#define STANDARD_IO_RESOURCES \
-+	(sizeof standard_io_resources / sizeof standard_io_resources[0])
++	return 0;
++}
 +
-+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++#endif /* CONFIG_ACPI */
 +
-+static int __init romchecksum(unsigned char *rom, unsigned long length)
++static int __init parse_disable_timer_pin_1(char *arg)
 +{
-+	unsigned char *p, sum = 0;
++	disable_timer_pin_1 = 1;
++	return 0;
++}
++early_param("disable_timer_pin_1", parse_disable_timer_pin_1);
 +
-+	for (p = rom; p < rom + length; p++)
-+		sum += *p;
-+	return sum == 0;
++static int __init parse_enable_timer_pin_1(char *arg)
++{
++	disable_timer_pin_1 = -1;
++	return 0;
 +}
++early_param("enable_timer_pin_1", parse_enable_timer_pin_1);
 +
-+static void __init probe_roms(void)
++static int __init parse_noapic(char *arg)
 +{
-+	unsigned long start, length, upper;
-+	unsigned char *rom;
-+	int	      i;
++	/* disable IO-APIC */
++	disable_ioapic_setup();
++	return 0;
++}
++early_param("noapic", parse_noapic);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/ioport-xen.c b/arch/i386/kernel/ioport-xen.c
+--- a/arch/i386/kernel/ioport-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/ioport-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,121 @@
++/*
++ *	linux/arch/i386/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
 +
-+#ifdef CONFIG_XEN
-+	/* Nothing to do if not running in dom0. */
-+	if (!is_initial_xendomain())
-+		return;
-+#endif
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <xen/interface/physdev.h>
 +
-+	/* video rom */
-+	upper = adapter_rom_resources[0].start;
-+	for (start = video_rom_resource.start; start < upper; start += 2048) {
-+		rom = isa_bus_to_virt(start);
-+		if (!romsignature(rom))
-+			continue;
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
++{
++	unsigned long mask;
++	unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
++	unsigned int low_index = base & (BITS_PER_LONG-1);
++	int length = low_index + extent;
 +
-+		video_rom_resource.start = start;
++	if (low_index != 0) {
++		mask = (~0UL << low_index);
++		if (length < BITS_PER_LONG)
++			mask &= ~(~0UL << length);
++		if (new_value)
++			*bitmap_base++ |= mask;
++		else
++			*bitmap_base++ &= ~mask;
++		length -= BITS_PER_LONG;
++	}
 +
-+		/* 0 < length <= 0x7f * 512, historically */
-+		length = rom[2] * 512;
++	mask = (new_value ? ~0UL : 0UL);
++	while (length >= BITS_PER_LONG) {
++		*bitmap_base++ = mask;
++		length -= BITS_PER_LONG;
++	}
 +
-+		/* if checksum okay, trust length byte */
-+		if (length && romchecksum(rom, length))
-+			video_rom_resource.end = start + length - 1;
-+		break;
++	if (length > 0) {
++		mask = ~(~0UL << length);
++		if (new_value)
++			*bitmap_base++ |= mask;
++		else
++			*bitmap_base++ &= ~mask;
 +	}
++}
 +
-+	start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
-+	if (start < upper)
-+		start = upper;
 +
-+	/* system rom */
-+	request_resource(&iomem_resource, &system_rom_resource);
-+	upper = system_rom_resource.start;
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++	struct thread_struct * t = &current->thread;
++	unsigned long *bitmap;
++	struct physdev_set_iobitmap set_iobitmap;
 +
-+	/* check for extension rom (ignore length byte!) */
-+	rom = isa_bus_to_virt(extension_rom_resource.start);
-+	if (romsignature(rom)) {
-+		length = extension_rom_resource.end - extension_rom_resource.start + 1;
-+		if (romchecksum(rom, length)) {
-+			request_resource(&iomem_resource, &extension_rom_resource);
-+			upper = extension_rom_resource.start;
-+		}
++	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++		return -EINVAL;
++	if (turn_on && !capable(CAP_SYS_RAWIO))
++		return -EPERM;
++
++	/*
++	 * If it's the first ioperm() call in this thread's lifetime, set the
++	 * IO bitmap up. ioperm() is much less timing critical than clone(),
++	 * this is why we delay this operation until now:
++	 */
++	if (!t->io_bitmap_ptr) {
++		bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++		if (!bitmap)
++			return -ENOMEM;
++
++		memset(bitmap, 0xff, IO_BITMAP_BYTES);
++		t->io_bitmap_ptr = bitmap;
++
++		set_iobitmap.bitmap   = (char *)bitmap;
++		set_iobitmap.nr_ports = IO_BITMAP_BITS;
++		HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
 +	}
 +
-+	/* check for adapter roms on 2k boundaries */
-+	for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
-+		rom = isa_bus_to_virt(start);
-+		if (!romsignature(rom))
-+			continue;
++	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
 +
-+		/* 0 < length <= 0x7f * 512, historically */
-+		length = rom[2] * 512;
++	return 0;
++}
 +
-+		/* but accept any length that fits if checksum okay */
-+		if (!length || start + length > upper || !romchecksum(rom, length))
-+			continue;
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ * Here we just change the eflags value on the stack: we allow
++ * only the super-user to do it. This depends on the stack-layout
++ * on system-call entry - see also fork() and the signal handling
++ * code.
++ */
 +
-+		adapter_rom_resources[i].start = start;
-+		adapter_rom_resources[i].end = start + length - 1;
-+		request_resource(&iomem_resource, &adapter_rom_resources[i]);
++asmlinkage long sys_iopl(unsigned long unused)
++{
++	volatile struct pt_regs * regs = (struct pt_regs *) &unused;
++	unsigned int level = regs->ebx;
++	struct thread_struct *t = &current->thread;
++	unsigned int old = (t->iopl >> 12) & 3;
 +
-+		start = adapter_rom_resources[i++].end & ~2047UL;
++	if (level > 3)
++		return -EINVAL;
++	/* Trying to gain more privileges? */
++	if (level > old) {
++		if (!capable(CAP_SYS_RAWIO))
++			return -EPERM;
 +	}
++	t->iopl = level << 12;
++	set_iopl_mask(t->iopl);
++	return 0;
 +}
-+
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
+--- a/arch/i386/kernel/irq.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/irq.c	2007-03-14 10:55:14.000000000 +0100
+@@ -302,7 +302,9 @@
+ 
+ 		cpus_and(mask, irq_desc[irq].affinity, map);
+ 		if (any_online_cpu(mask) == NR_CPUS) {
++#ifndef CONFIG_XEN
+ 			printk("Breaking affinity for irq %i\n", irq);
++#endif
+ 			mask = map;
+ 		}
+ 		if (irq_desc[irq].chip->set_affinity)
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/ldt-xen.c b/arch/i386/kernel/ldt-xen.c
+--- a/arch/i386/kernel/ldt-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/ldt-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,268 @@
 +/*
-+ * Point at the empty zero page to start with. We map the real shared_info
-+ * page as soon as fixmap is up and running.
++ * linux/arch/i386/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
 + */
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+EXPORT_SYMBOL(HYPERVISOR_shared_info);
 +
-+unsigned long *phys_to_machine_mapping;
-+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
-+EXPORT_SYMBOL(phys_to_machine_mapping);
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
 +
-+/* Raw start-of-day parameters from the hypervisor. */
-+start_info_t *xen_start_info;
-+EXPORT_SYMBOL(xen_start_info);
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/mmu_context.h>
 +
-+void __init add_memory_region(unsigned long long start,
-+                                  unsigned long long size, int type)
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
 +{
-+	int x;
-+
-+	if (!efi_enabled) {
-+       		x = e820.nr_map;
-+
-+		if (x == E820MAX) {
-+		    printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-+		    return;
-+		}
-+
-+		e820.map[x].addr = start;
-+		e820.map[x].size = size;
-+		e820.map[x].type = type;
-+		e820.nr_map++;
-+	}
-+} /* add_memory_region */
++	if (current->active_mm)
++		load_LDT(&current->active_mm->context);
++}
++#endif
 +
-+static void __init limit_regions(unsigned long long size)
++static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
 +{
-+	unsigned long long current_addr = 0;
-+	int i;
++	void *oldldt;
++	void *newldt;
++	int oldsize;
 +
-+	if (efi_enabled) {
-+		efi_memory_desc_t *md;
-+		void *p;
++	if (mincount <= pc->size)
++		return 0;
++	oldsize = pc->size;
++	mincount = (mincount+511)&(~511);
++	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++	else
++		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
 +
-+		for (p = memmap.map, i = 0; p < memmap.map_end;
-+			p += memmap.desc_size, i++) {
-+			md = p;
-+			current_addr = md->phys_addr + (md->num_pages << 12);
-+			if (md->type == EFI_CONVENTIONAL_MEMORY) {
-+				if (current_addr >= size) {
-+					md->num_pages -=
-+						(((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
-+					memmap.nr_map = i + 1;
-+					return;
-+				}
-+			}
-+		}
-+	}
-+	for (i = 0; i < e820.nr_map; i++) {
-+		current_addr = e820.map[i].addr + e820.map[i].size;
-+		if (current_addr < size)
-+			continue;
++	if (!newldt)
++		return -ENOMEM;
 +
-+		if (e820.map[i].type != E820_RAM)
-+			continue;
++	if (oldsize)
++		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++	oldldt = pc->ldt;
++	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++	pc->ldt = newldt;
++	wmb();
++	pc->size = mincount;
++	wmb();
 +
-+		if (e820.map[i].addr >= size) {
-+			/*
-+			 * This region starts past the end of the
-+			 * requested size, skip it completely.
-+			 */
-+			e820.nr_map = i;
-+		} else {
-+			e820.nr_map = i + 1;
-+			e820.map[i].size -= current_addr - size;
-+		}
-+		return;
++	if (reload) {
++#ifdef CONFIG_SMP
++		cpumask_t mask;
++		preempt_disable();
++#endif
++		make_pages_readonly(
++			pc->ldt,
++			(pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		load_LDT(pc);
++#ifdef CONFIG_SMP
++		mask = cpumask_of_cpu(smp_processor_id());
++		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++			smp_call_function(flush_ldt, NULL, 1, 1);
++		preempt_enable();
++#endif
 +	}
-+#ifdef CONFIG_XEN
-+	if (i==e820.nr_map && current_addr < size) {
-+		/*
-+                 * The e820 map finished before our requested size so
-+                 * extend the final entry to the requested address.
-+                 */
-+		--i;
-+		if (e820.map[i].type == E820_RAM)
-+			e820.map[i].size -= current_addr - size;
++	if (oldsize) {
++		make_pages_writable(
++			oldldt,
++			(oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++			vfree(oldldt);
 +		else
-+			add_memory_region(current_addr, size - current_addr, E820_RAM);
++			kfree(oldldt);
 +	}
-+#endif
++	return 0;
 +}
 +
-+#define E820_DEBUG	1
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++	int err = alloc_ldt(new, old->size, 0);
++	if (err < 0)
++		return err;
++	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++	make_pages_readonly(
++		new->ldt,
++		(new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++		XENFEAT_writable_descriptor_tables);
++	return 0;
++}
 +
-+static void __init print_memory_map(char *who)
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 +{
-+	int i;
++	struct mm_struct * old_mm;
++	int retval = 0;
 +
-+	for (i = 0; i < e820.nr_map; i++) {
-+		printk(" %s: %016Lx - %016Lx ", who,
-+			e820.map[i].addr,
-+			e820.map[i].addr + e820.map[i].size);
-+		switch (e820.map[i].type) {
-+		case E820_RAM:	printk("(usable)\n");
-+				break;
-+		case E820_RESERVED:
-+				printk("(reserved)\n");
-+				break;
-+		case E820_ACPI:
-+				printk("(ACPI data)\n");
-+				break;
-+		case E820_NVS:
-+				printk("(ACPI NVS)\n");
-+				break;
-+		default:	printk("type %lu\n", e820.map[i].type);
-+				break;
-+		}
++	init_MUTEX(&mm->context.sem);
++	mm->context.size = 0;
++	mm->context.has_foreign_mappings = 0;
++	old_mm = current->mm;
++	if (old_mm && old_mm->context.size > 0) {
++		down(&old_mm->context.sem);
++		retval = copy_ldt(&mm->context, &old_mm->context);
++		up(&old_mm->context.sem);
 +	}
++	return retval;
 +}
 +
 +/*
-+ * Sanitize the BIOS e820 map.
-+ *
-+ * Some e820 responses include overlapping entries.  The following 
-+ * replaces the original e820 map with a new one, removing overlaps.
-+ *
++ * No need to lock the MM as we are the last user
 + */
-+struct change_member {
-+	struct e820entry *pbios; /* pointer to original bios entry */
-+	unsigned long long addr; /* address for this change point */
-+};
-+static struct change_member change_point_list[2*E820MAX] __initdata;
-+static struct change_member *change_point[2*E820MAX] __initdata;
-+static struct e820entry *overlap_list[E820MAX] __initdata;
-+static struct e820entry new_bios[E820MAX] __initdata;
++void destroy_context(struct mm_struct *mm)
++{
++	if (mm->context.size) {
++		if (mm == current->active_mm)
++			clear_LDT();
++		make_pages_writable(
++			mm->context.ldt,
++			(mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++			vfree(mm->context.ldt);
++		else
++			kfree(mm->context.ldt);
++		mm->context.size = 0;
++	}
++}
 +
-+int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++static int read_ldt(void __user * ptr, unsigned long bytecount)
 +{
-+	struct change_member *change_tmp;
-+	unsigned long current_type, last_type;
-+	unsigned long long last_addr;
-+	int chgidx, still_changing;
-+	int overlap_entries;
-+	int new_bios_entry;
-+	int old_nr, new_nr, chg_nr;
-+	int i;
++	int err;
++	unsigned long size;
++	struct mm_struct * mm = current->mm;
 +
-+	/*
-+		Visually we're performing the following (1,2,3,4 = memory types)...
++	if (!mm->context.size)
++		return 0;
++	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
 +
-+		Sample memory map (w/overlaps):
-+		   ____22__________________
-+		   ______________________4_
-+		   ____1111________________
-+		   _44_____________________
-+		   11111111________________
-+		   ____________________33__
-+		   ___________44___________
-+		   __________33333_________
-+		   ______________22________
-+		   ___________________2222_
-+		   _________111111111______
-+		   _____________________11_
-+		   _________________4______
++	down(&mm->context.sem);
++	size = mm->context.size*LDT_ENTRY_SIZE;
++	if (size > bytecount)
++		size = bytecount;
 +
-+		Sanitized equivalent (no overlap):
-+		   1_______________________
-+		   _44_____________________
-+		   ___1____________________
-+		   ____22__________________
-+		   ______11________________
-+		   _________1______________
-+		   __________3_____________
-+		   ___________44___________
-+		   _____________33_________
-+		   _______________2________
-+		   ________________1_______
-+		   _________________4______
-+		   ___________________2____
-+		   ____________________33__
-+		   ______________________4_
-+	*/
++	err = 0;
++	if (copy_to_user(ptr, mm->context.ldt, size))
++		err = -EFAULT;
++	up(&mm->context.sem);
++	if (err < 0)
++		goto error_return;
++	if (size != bytecount) {
++		/* zero-fill the rest */
++		if (clear_user(ptr+size, bytecount-size) != 0) {
++			err = -EFAULT;
++			goto error_return;
++		}
++	}
++	return bytecount;
++error_return:
++	return err;
++}
 +
-+	/* if there's only one memory region, don't bother */
-+	if (*pnr_map < 2)
-+		return -1;
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++	int err;
++	unsigned long size;
 +
-+	old_nr = *pnr_map;
++	err = 0;
++	size = 5*sizeof(struct desc_struct);
++	if (size > bytecount)
++		size = bytecount;
 +
-+	/* bail out if we find any unreasonable addresses in bios map */
-+	for (i=0; i<old_nr; i++)
-+		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
-+			return -1;
++	err = size;
++	if (clear_user(ptr, size))
++		err = -EFAULT;
 +
-+	/* create pointers for initial change-point information (for sorting) */
-+	for (i=0; i < 2*old_nr; i++)
-+		change_point[i] = &change_point_list[i];
++	return err;
++}
 +
-+	/* record all known change-points (starting and ending addresses),
-+	   omitting those that are for empty memory regions */
-+	chgidx = 0;
-+	for (i=0; i < old_nr; i++)	{
-+		if (biosmap[i].size != 0) {
-+			change_point[chgidx]->addr = biosmap[i].addr;
-+			change_point[chgidx++]->pbios = &biosmap[i];
-+			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-+			change_point[chgidx++]->pbios = &biosmap[i];
-+		}
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++	struct mm_struct * mm = current->mm;
++	__u32 entry_1, entry_2;
++	int error;
++	struct user_desc ldt_info;
++
++	error = -EINVAL;
++	if (bytecount != sizeof(ldt_info))
++		goto out;
++	error = -EFAULT; 	
++	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
++		goto out;
++
++	error = -EINVAL;
++	if (ldt_info.entry_number >= LDT_ENTRIES)
++		goto out;
++	if (ldt_info.contents == 3) {
++		if (oldmode)
++			goto out;
++		if (ldt_info.seg_not_present == 0)
++			goto out;
 +	}
-+	chg_nr = chgidx;    	/* true number of change-points */
 +
-+	/* sort change-point list by memory addresses (low -> high) */
-+	still_changing = 1;
-+	while (still_changing)	{
-+		still_changing = 0;
-+		for (i=1; i < chg_nr; i++)  {
-+			/* if <current_addr> > <last_addr>, swap */
-+			/* or, if current=<start_addr> & last=<end_addr>, swap */
-+			if ((change_point[i]->addr < change_point[i-1]->addr) ||
-+				((change_point[i]->addr == change_point[i-1]->addr) &&
-+				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
-+				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
-+			   )
-+			{
-+				change_tmp = change_point[i];
-+				change_point[i] = change_point[i-1];
-+				change_point[i-1] = change_tmp;
-+				still_changing=1;
-+			}
-+		}
++	down(&mm->context.sem);
++	if (ldt_info.entry_number >= mm->context.size) {
++		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
++		if (error < 0)
++			goto out_unlock;
 +	}
 +
-+	/* create a new bios memory map, removing overlaps */
-+	overlap_entries=0;	 /* number of entries in the overlap table */
-+	new_bios_entry=0;	 /* index for creating new bios map entries */
-+	last_type = 0;		 /* start with undefined memory type */
-+	last_addr = 0;		 /* start with 0 as last starting address */
-+	/* loop through change-points, determining affect on the new bios map */
-+	for (chgidx=0; chgidx < chg_nr; chgidx++)
-+	{
-+		/* keep track of all overlapping bios entries */
-+		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
-+		{
-+			/* add map entry to overlap list (> 1 entry implies an overlap) */
-+			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
-+		}
-+		else
-+		{
-+			/* remove entry from list (order independent, so swap with last) */
-+			for (i=0; i<overlap_entries; i++)
-+			{
-+				if (overlap_list[i] == change_point[chgidx]->pbios)
-+					overlap_list[i] = overlap_list[overlap_entries-1];
-+			}
-+			overlap_entries--;
-+		}
-+		/* if there are overlapping entries, decide which "type" to use */
-+		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-+		current_type = 0;
-+		for (i=0; i<overlap_entries; i++)
-+			if (overlap_list[i]->type > current_type)
-+				current_type = overlap_list[i]->type;
-+		/* continue building up new bios map based on this information */
-+		if (current_type != last_type)	{
-+			if (last_type != 0)	 {
-+				new_bios[new_bios_entry].size =
-+					change_point[chgidx]->addr - last_addr;
-+				/* move forward only if the new size was non-zero */
-+				if (new_bios[new_bios_entry].size != 0)
-+					if (++new_bios_entry >= E820MAX)
-+						break; 	/* no more space left for new bios entries */
-+			}
-+			if (current_type != 0)	{
-+				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-+				new_bios[new_bios_entry].type = current_type;
-+				last_addr=change_point[chgidx]->addr;
-+			}
-+			last_type = current_type;
++   	/* Allow LDTs to be cleared by the user. */
++   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++		if (oldmode || LDT_empty(&ldt_info)) {
++			entry_1 = 0;
++			entry_2 = 0;
++			goto install;
 +		}
 +	}
-+	new_nr = new_bios_entry;   /* retain count for new bios entries */
 +
-+	/* copy new bios mapping into original location */
-+	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-+	*pnr_map = new_nr;
++	entry_1 = LDT_entry_a(&ldt_info);
++	entry_2 = LDT_entry_b(&ldt_info);
++	if (oldmode)
++		entry_2 &= ~(1 << 20);
 +
-+	return 0;
++	/* Install the new entry ...  */
++install:
++	error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
++				entry_1, entry_2);
++
++out_unlock:
++	up(&mm->context.sem);
++out:
++	return error;
 +}
 +
-+/*
-+ * Copy the BIOS e820 map into a safe place.
-+ *
-+ * Sanity-check it while we're at it..
-+ *
-+ * If we're lucky and live on a modern system, the setup code
-+ * will have given us a memory map that we can use to properly
-+ * set up memory.  If we aren't, we'll fake a memory map.
-+ *
-+ * We check to see that the memory map contains at least 2 elements
-+ * before we'll use it, because the detection code in setup.S may
-+ * not be perfect and most every PC known to man has two memory
-+ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
-+ * thinkpad 560x, for example, does not cooperate with the memory
-+ * detection code.)
-+ */
-+int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
 +{
-+#ifndef CONFIG_XEN
-+	/* Only one memory region (or negative)? Ignore it */
-+	if (nr_map < 2)
-+		return -1;
-+#else
-+	BUG_ON(nr_map < 1);
-+#endif
++	int ret = -ENOSYS;
 +
-+	do {
-+		unsigned long long start = biosmap->addr;
-+		unsigned long long size = biosmap->size;
-+		unsigned long long end = start + size;
-+		unsigned long type = biosmap->type;
++	switch (func) {
++	case 0:
++		ret = read_ldt(ptr, bytecount);
++		break;
++	case 1:
++		ret = write_ldt(ptr, bytecount, 1);
++		break;
++	case 2:
++		ret = read_default_ldt(ptr, bytecount);
++		break;
++	case 0x11:
++		ret = write_ldt(ptr, bytecount, 0);
++		break;
++	}
++	return ret;
++}
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
+--- a/arch/i386/kernel/Makefile	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/Makefile	2007-03-14 10:55:14.000000000 +0100
+@@ -47,6 +47,12 @@
+ 
+ obj-$(CONFIG_SCx200)		+= scx200.o
+ 
++ifdef CONFIG_XEN
++vsyscall_note := vsyscall-note-xen.o
++else
++vsyscall_note := vsyscall-note.o
++endif
 +
-+		/* Overflow in 64 bits? Ignore the memory map. */
-+		if (start > end)
-+			return -1;
+ # vsyscall.o contains the vsyscall DSO images as __initdata.
+ # We must build both images before we can assemble it.
+ # Note: kbuild does not track this dependency due to usage of .incbin
+@@ -68,7 +74,7 @@
+ 
+ $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
+ $(obj)/vsyscall-%.so: $(src)/vsyscall.lds \
+-		      $(obj)/vsyscall-%.o $(obj)/vsyscall-note.o FORCE
++		      $(obj)/vsyscall-%.o $(obj)/$(vsyscall_note) FORCE
+ 	$(call if_changed,syscall)
+ 
+ # We also create a special relocatable object that should mirror the symbol
+@@ -80,9 +86,20 @@
+ 
+ SYSCFLAGS_vsyscall-syms.o = -r
+ $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
+-			$(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
++			$(obj)/vsyscall-sysenter.o $(obj)/$(vsyscall_note) FORCE
+ 	$(call if_changed,syscall)
+ 
+ k8-y                      += ../../x86_64/kernel/k8.o
+ stacktrace-y		  += ../../x86_64/kernel/stacktrace.o
+ 
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
 +
-+#ifndef CONFIG_XEN
-+		/*
-+		 * Some BIOSes claim RAM in the 640k - 1M region.
-+		 * Not right. Fix it up.
-+		 */
-+		if (type == E820_RAM) {
-+			if (start < 0x100000ULL && end > 0xA0000ULL) {
-+				if (start < 0xA0000ULL)
-+					add_memory_region(start, 0xA0000ULL-start, type);
-+				if (end <= 0x100000ULL)
-+					continue;
-+				start = 0x100000ULL;
-+				size = end - start;
-+			}
-+		}
-+#endif
-+		add_memory_region(start, size, type);
-+	} while (biosmap++,--nr_map);
-+	return 0;
-+}
++obj-y += fixup.o
++microcode-$(subst m,y,$(CONFIG_MICROCODE)) := microcode-xen.o
++n-obj-xen := i8259.o timers/ reboot.o smpboot.o trampoline.o
 +
-+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-+struct edd edd;
-+#ifdef CONFIG_EDD_MODULE
-+EXPORT_SYMBOL(edd);
-+#endif
-+/**
-+ * copy_edd() - Copy the BIOS EDD information
-+ *              from boot_params into a safe place.
++obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
++obj-y := $(call cherrypickxen, $(obj-y))
++extra-y := $(call cherrypickxen, $(extra-y))
++endif
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/microcode-xen.c b/arch/i386/kernel/microcode-xen.c
+--- a/arch/i386/kernel/microcode-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/microcode-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,147 @@
++/*
++ *	Intel CPU Microcode Update Driver for Linux
++ *
++ *	Copyright (C) 2000-2004 Tigran Aivazian
++ *
++ *	This driver allows to upgrade microcode on Intel processors
++ *	belonging to IA-32 family - PentiumPro, Pentium II, 
++ *	Pentium III, Xeon, Pentium 4, etc.
++ *
++ *	Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual, 
++ *	Order Number 245472 or free download from:
++ *		
++ *	http://developer.intel.com/design/pentium4/manuals/245472.htm
++ *
++ *	For more information, go to http://www.urbanmyth.org/microcode
 + *
++ *	This program is free software; you can redistribute it and/or
++ *	modify it under the terms of the GNU General Public License
++ *	as published by the Free Software Foundation; either version
++ *	2 of the License, or (at your option) any later version.
 + */
-+static inline void copy_edd(void)
-+{
-+     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
-+     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
-+     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
-+     edd.edd_info_nr = EDD_NR;
-+}
-+#else
-+static inline void copy_edd(void)
-+{
-+}
-+#endif
 +
-+static void __init parse_cmdline_early (char ** cmdline_p)
-+{
-+	char c = ' ', *to = command_line, *from = saved_command_line;
-+	int len = 0, max_cmdline;
-+	int userdef = 0;
-+
-+	if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
-+		max_cmdline = COMMAND_LINE_SIZE;
-+	memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
-+	/* Save unparsed command line copy for /proc/cmdline */
-+	saved_command_line[max_cmdline-1] = '\0';
++//#define DEBUG /* pr_debug */
++#include <linux/capability.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/cpumask.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/miscdevice.h>
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <linux/syscalls.h>
 +
-+	for (;;) {
-+		if (c != ' ')
-+			goto next_char;
-+		/*
-+		 * "mem=nopentium" disables the 4MB page tables.
-+		 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
-+		 * to <mem>, overriding the bios size.
-+		 * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
-+		 * <start> to <start>+<mem>, overriding the bios size.
-+		 *
-+		 * HPA tells me bootloaders need to parse mem=, so no new
-+		 * option should be mem=  [also see Documentation/i386/boot.txt]
-+		 */
-+		if (!memcmp(from, "mem=", 4)) {
-+			if (to != command_line)
-+				to--;
-+			if (!memcmp(from+4, "nopentium", 9)) {
-+				from += 9+4;
-+				clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-+				disable_pse = 1;
-+			} else {
-+				/* If the user specifies memory size, we
-+				 * limit the BIOS-provided memory map to
-+				 * that size. exactmap can be used to specify
-+				 * the exact map. mem=number can be used to
-+				 * trim the existing memory map.
-+				 */
-+				unsigned long long mem_size;
-+ 
-+				mem_size = memparse(from+4, &from);
-+				limit_regions(mem_size);
-+				userdef=1;
-+			}
-+		}
++#include <asm/msr.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
 +
-+		else if (!memcmp(from, "memmap=", 7)) {
-+			if (to != command_line)
-+				to--;
-+			if (!memcmp(from+7, "exactmap", 8)) {
-+#ifdef CONFIG_CRASH_DUMP
-+				/* If we are doing a crash dump, we
-+				 * still need to know the real mem
-+				 * size before original memory map is
-+				 * reset.
-+				 */
-+				find_max_pfn();
-+				saved_max_pfn = max_pfn;
-+#endif
-+				from += 8+7;
-+				e820.nr_map = 0;
-+				userdef = 1;
-+			} else {
-+				/* If the user specifies memory size, we
-+				 * limit the BIOS-provided memory map to
-+				 * that size. exactmap can be used to specify
-+				 * the exact map. mem=number can be used to
-+				 * trim the existing memory map.
-+				 */
-+				unsigned long long start_at, mem_size;
-+ 
-+				mem_size = memparse(from+7, &from);
-+				if (*from == '@') {
-+					start_at = memparse(from+1, &from);
-+					add_memory_region(start_at, mem_size, E820_RAM);
-+				} else if (*from == '#') {
-+					start_at = memparse(from+1, &from);
-+					add_memory_region(start_at, mem_size, E820_ACPI);
-+				} else if (*from == '$') {
-+					start_at = memparse(from+1, &from);
-+					add_memory_region(start_at, mem_size, E820_RESERVED);
-+				} else {
-+					limit_regions(mem_size);
-+					userdef=1;
-+				}
-+			}
-+		}
++MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
++MODULE_AUTHOR("Tigran Aivazian <tigran at veritas.com>");
++MODULE_LICENSE("GPL");
 +
-+		else if (!memcmp(from, "noexec=", 7))
-+			noexec_setup(from + 7);
++#define MICROCODE_VERSION 	"1.14-xen"
 +
++#define DEFAULT_UCODE_DATASIZE 	(2000) 	  /* 2000 bytes */
++#define MC_HEADER_SIZE		(sizeof (microcode_header_t))  	  /* 48 bytes */
++#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
 +
-+#ifdef  CONFIG_X86_MPPARSE
-+		/*
-+		 * If the BIOS enumerates physical processors before logical,
-+		 * maxcpus=N at enumeration-time can be used to disable HT.
-+		 */
-+		else if (!memcmp(from, "maxcpus=", 8)) {
-+			extern unsigned int maxcpus;
++/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
++static DEFINE_MUTEX(microcode_mutex);
 +
-+			maxcpus = simple_strtoul(from + 8, NULL, 0);
-+		}
-+#endif
++static void __user *user_buffer;	/* user area microcode data buffer */
++static unsigned int user_buffer_size;	/* it's size */
++				
++static int microcode_open (struct inode *unused1, struct file *unused2)
++{
++	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
 +
-+#ifdef CONFIG_ACPI
-+		/* "acpi=off" disables both ACPI table parsing and interpreter */
-+		else if (!memcmp(from, "acpi=off", 8)) {
-+			disable_acpi();
-+		}
 +
-+		/* acpi=force to over-ride black-list */
-+		else if (!memcmp(from, "acpi=force", 10)) {
-+			acpi_force = 1;
-+			acpi_ht = 1;
-+			acpi_disabled = 0;
-+		}
++static int do_microcode_update (void)
++{
++	int err;
++	dom0_op_t op;
 +
-+		/* acpi=strict disables out-of-spec workarounds */
-+		else if (!memcmp(from, "acpi=strict", 11)) {
-+			acpi_strict = 1;
-+		}
++	err = sys_mlock((unsigned long)user_buffer, user_buffer_size);
++	if (err != 0)
++		return err;
 +
-+		/* Limit ACPI just to boot-time to enable HT */
-+		else if (!memcmp(from, "acpi=ht", 7)) {
-+			if (!acpi_force)
-+				disable_acpi();
-+			acpi_ht = 1;
-+		}
-+		
-+		/* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
-+		else if (!memcmp(from, "pci=noacpi", 10)) {
-+			acpi_disable_pci();
-+		}
-+		/* "acpi=noirq" disables ACPI interrupt routing */
-+		else if (!memcmp(from, "acpi=noirq", 10)) {
-+			acpi_noirq_set();
-+		}
++	op.cmd = DOM0_MICROCODE;
++	set_xen_guest_handle(op.u.microcode.data, user_buffer);
++	op.u.microcode.length = user_buffer_size;
++	err = HYPERVISOR_dom0_op(&op);
 +
-+		else if (!memcmp(from, "acpi_sci=edge", 13))
-+			acpi_sci_flags.trigger =  1;
++	(void)sys_munlock((unsigned long)user_buffer, user_buffer_size);
 +
-+		else if (!memcmp(from, "acpi_sci=level", 14))
-+			acpi_sci_flags.trigger = 3;
++	return err;
++}
 +
-+		else if (!memcmp(from, "acpi_sci=high", 13))
-+			acpi_sci_flags.polarity = 1;
++static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
++{
++	ssize_t ret;
 +
-+		else if (!memcmp(from, "acpi_sci=low", 12))
-+			acpi_sci_flags.polarity = 3;
++	if (len < DEFAULT_UCODE_TOTALSIZE) {
++		printk(KERN_ERR "microcode: not enough data\n"); 
++		return -EINVAL;
++	}
 +
-+#ifdef CONFIG_X86_IO_APIC
-+		else if (!memcmp(from, "acpi_skip_timer_override", 24))
-+			acpi_skip_timer_override = 1;
++	if ((len >> PAGE_SHIFT) > num_physpages) {
++		printk(KERN_ERR "microcode: too much data (max %ld pages)\n", num_physpages);
++		return -EINVAL;
++	}
 +
-+		if (!memcmp(from, "disable_timer_pin_1", 19))
-+			disable_timer_pin_1 = 1;
-+		if (!memcmp(from, "enable_timer_pin_1", 18))
-+			disable_timer_pin_1 = -1;
-+
-+		/* disable IO-APIC */
-+		else if (!memcmp(from, "noapic", 6))
-+			disable_ioapic_setup();
-+#endif /* CONFIG_X86_IO_APIC */
-+#endif /* CONFIG_ACPI */
++	mutex_lock(&microcode_mutex);
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+		/* enable local APIC */
-+		else if (!memcmp(from, "lapic", 5))
-+			lapic_enable();
-+
-+		/* disable local APIC */
-+		else if (!memcmp(from, "nolapic", 6))
-+			lapic_disable();
-+#endif /* CONFIG_X86_LOCAL_APIC */
++	user_buffer = (void __user *) buf;
++	user_buffer_size = (int) len;
 +
-+#ifdef CONFIG_KEXEC
-+		/* crashkernel=size at addr specifies the location to reserve for
-+		 * a crash kernel.  By reserving this memory we guarantee
-+		 * that linux never set's it up as a DMA target.
-+		 * Useful for holding code to do something appropriate
-+		 * after a kernel panic.
-+		 */
-+		else if (!memcmp(from, "crashkernel=", 12)) {
-+			unsigned long size, base;
-+			size = memparse(from+12, &from);
-+			if (*from == '@') {
-+				base = memparse(from+1, &from);
-+				/* FIXME: Do I want a sanity check
-+				 * to validate the memory range?
-+				 */
-+				crashk_res.start = base;
-+				crashk_res.end   = base + size - 1;
-+			}
-+		}
-+#endif
-+#ifdef CONFIG_PROC_VMCORE
-+		/* elfcorehdr= specifies the location of elf core header
-+		 * stored by the crashed kernel.
-+		 */
-+		else if (!memcmp(from, "elfcorehdr=", 11))
-+			elfcorehdr_addr = memparse(from+11, &from);
-+#endif
++	ret = do_microcode_update();
++	if (!ret)
++		ret = (ssize_t)len;
 +
-+		/*
-+		 * highmem=size forces highmem to be exactly 'size' bytes.
-+		 * This works even on boxes that have no highmem otherwise.
-+		 * This also works to reduce highmem size on bigger boxes.
-+		 */
-+		else if (!memcmp(from, "highmem=", 8))
-+			highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
-+	
-+		/*
-+		 * vmalloc=size forces the vmalloc area to be exactly 'size'
-+		 * bytes. This can be used to increase (or decrease) the
-+		 * vmalloc area - the default is 128m.
-+		 */
-+		else if (!memcmp(from, "vmalloc=", 8))
-+			__VMALLOC_RESERVE = memparse(from+8, &from);
++	mutex_unlock(&microcode_mutex);
 +
-+	next_char:
-+		c = *(from++);
-+		if (!c)
-+			break;
-+		if (COMMAND_LINE_SIZE <= ++len)
-+			break;
-+		*(to++) = c;
-+	}
-+	*to = '\0';
-+	*cmdline_p = command_line;
-+	if (userdef) {
-+		printk(KERN_INFO "user-defined physical RAM map:\n");
-+		print_memory_map("user");
-+	}
++	return ret;
 +}
 +
-+/*
-+ * Callback for efi_memory_walk.
-+ */
-+static int __init
-+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
++static struct file_operations microcode_fops = {
++	.owner		= THIS_MODULE,
++	.write		= microcode_write,
++	.open		= microcode_open,
++};
++
++static struct miscdevice microcode_dev = {
++	.minor		= MICROCODE_MINOR,
++	.name		= "microcode",
++	.fops		= &microcode_fops,
++};
++
++static int __init microcode_init (void)
 +{
-+	unsigned long *max_pfn = arg, pfn;
++	int error;
 +
-+	if (start < end) {
-+		pfn = PFN_UP(end -1);
-+		if (pfn > *max_pfn)
-+			*max_pfn = pfn;
++	error = misc_register(&microcode_dev);
++	if (error) {
++		printk(KERN_ERR
++			"microcode: can't misc_register on minor=%d\n",
++			MICROCODE_MINOR);
++		return error;
 +	}
-+	return 0;
-+}
 +
-+static int __init
-+efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
-+{
-+	memory_present(0, start, end);
++	printk(KERN_INFO 
++		"IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran at veritas.com>\n");
 +	return 0;
 +}
 +
-+ /*
-+  * This function checks if the entire range <start,end> is mapped with type.
-+  *
-+  * Note: this function only works correct if the e820 table is sorted and
-+  * not-overlapping, which is the case
-+  */
-+int __init
-+e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
++static void __exit microcode_exit (void)
 +{
-+	u64 start = s;
-+	u64 end = e;
-+	int i;
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i];
-+		if (type && ei->type != type)
-+			continue;
-+		/* is the region (part) in overlap with the current region ?*/
-+		if (ei->addr >= end || ei->addr + ei->size <= start)
-+			continue;
-+		/* if the region is at the beginning of <start,end> we move
-+		 * start to the end of the region since it's ok until there
-+		 */
-+		if (ei->addr <= start)
-+			start = ei->addr + ei->size;
-+		/* if start is now at or beyond end, we're done, full
-+		 * coverage */
-+		if (start >= end)
-+			return 1; /* we're done */
-+	}
-+	return 0;
++	misc_deregister(&microcode_dev);
 +}
 +
++module_init(microcode_init)
++module_exit(microcode_exit)
++MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
+--- a/arch/i386/kernel/mpparse.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/mpparse.c	2007-03-14 10:55:14.000000000 +0100
+@@ -106,6 +106,7 @@
+ 
+ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
+ {
++#ifndef CONFIG_XEN
+  	int ver, apicid;
+ 	physid_mask_t phys_cpu;
+  	
+@@ -196,8 +197,9 @@
+ 	}
+ 
+ 	cpu_set(num_processors, cpu_possible_map);
++#endif /* CONFIG_XEN */
+ 	num_processors++;
+-
++#ifndef CONFIG_XEN
+ 	/*
+ 	 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
+ 	 * but we need to work other dependencies like SMP_SUSPEND etc
+@@ -218,6 +220,7 @@
+ 		}
+ 	}
+ 	bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
++#endif /* CONFIG_XEN */
+ }
+ 
+ static void __init MP_bus_info (struct mpc_config_bus *m)
+@@ -684,7 +687,11 @@
+ 		 * Read the physical hardware table.  Anything here will
+ 		 * override the defaults.
+ 		 */
++#ifdef CONFIG_XEN
++		if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++#else
+ 		if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
++#endif
+ 			smp_found_config = 0;
+ 			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
+ 			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
+@@ -719,7 +726,11 @@
+ 
+ static int __init smp_scan_config (unsigned long base, unsigned long length)
+ {
++#ifdef CONFIG_XEN
++	unsigned long *bp = isa_bus_to_virt(base);
++#else
+ 	unsigned long *bp = phys_to_virt(base);
++#endif
+ 	struct intel_mp_floating *mpf;
+ 
+ 	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
+@@ -735,6 +746,7 @@
+ 				|| (mpf->mpf_specification == 4)) ) {
+ 
+ 			smp_found_config = 1;
++#ifndef CONFIG_XEN
+ 			printk(KERN_INFO "found SMP MP-table at %08lx\n",
+ 						virt_to_phys(mpf));
+ 			reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
+@@ -754,6 +766,10 @@
+ 					size = end - mpf->mpf_physptr;
+ 				reserve_bootmem(mpf->mpf_physptr, size);
+ 			}
++#else
++			printk(KERN_INFO "found SMP MP-table at %08lx\n",
++				((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
++#endif
+ 
+ 			mpf_found = mpf;
+ 			return 1;
+@@ -766,7 +782,9 @@
+ 
+ void __init find_smp_config (void)
+ {
++#ifndef CONFIG_XEN
+ 	unsigned int address;
++#endif
+ 
+ 	/*
+ 	 * FIXME: Linux assumes you have 640K of base ram..
+@@ -797,9 +815,11 @@
+ 	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
+ 	 */
+ 
++#ifndef CONFIG_XEN
+ 	address = get_bios_ebda();
+ 	if (address)
+ 		smp_scan_config(address, 0x400);
++#endif
+ }
+ 
+ int es7000_plat;
+@@ -812,6 +832,7 @@
+ 
+ void __init mp_register_lapic_address(u64 address)
+ {
++#ifndef CONFIG_XEN
+ 	mp_lapic_addr = (unsigned long) address;
+ 
+ 	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
+@@ -820,6 +841,7 @@
+ 		boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+ 
+ 	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
+ }
+ 
+ void __cpuinit mp_register_lapic (u8 id, u8 enabled)
+@@ -836,6 +858,7 @@
+ 	if (id == boot_cpu_physical_apicid)
+ 		boot_cpu = 1;
+ 
++#ifndef CONFIG_XEN
+ 	processor.mpc_type = MP_PROCESSOR;
+ 	processor.mpc_apicid = id;
+ 	processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
+@@ -846,6 +869,7 @@
+ 	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
+ 	processor.mpc_reserved[0] = 0;
+ 	processor.mpc_reserved[1] = 0;
++#endif
+ 
+ 	MP_processor_info(&processor);
+ }
+@@ -900,7 +924,9 @@
+ 	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
+ 	mp_ioapics[idx].mpc_apicaddr = address;
+ 
++#ifndef CONFIG_XEN
+ 	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++#endif
+ 	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ 		&& !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
+ 		tmpid = io_apic_get_unique_id(idx, id);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/pci-dma-xen.c b/arch/i386/kernel/pci-dma-xen.c
+--- a/arch/i386/kernel/pci-dma-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/pci-dma-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,374 @@
 +/*
-+ * Find the highest page frame number we have available
++ * Dynamic DMA mapping support.
++ *
++ * On i386 there is no hardware dynamic DMA address translation,
++ * so consistent alloc/free are merely page allocation/freeing.
++ * The rest of the dynamic DMA mapping interface is implemented
++ * in asm/pci.h.
 + */
-+void __init find_max_pfn(void)
-+{
-+	int i;
 +
-+	max_pfn = 0;
-+	if (efi_enabled) {
-+		efi_memmap_walk(efi_find_max_pfn, &max_pfn);
-+		efi_memmap_walk(efi_memory_present_wrapper, NULL);
-+		return;
-+	}
++#include <linux/types.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/pci.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <asm/io.h>
++#include <xen/balloon.h>
++#include <asm/tlbflush.h>
++#include <asm-i386/mach-xen/asm/swiotlb.h>
++#include <asm/bug.h>
 +
-+	for (i = 0; i < e820.nr_map; i++) {
-+		unsigned long start, end;
-+		/* RAM? */
-+		if (e820.map[i].type != E820_RAM)
-+			continue;
-+		start = PFN_UP(e820.map[i].addr);
-+		end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-+		if (start >= end)
-+			continue;
-+		if (end > max_pfn)
-+			max_pfn = end;
-+		memory_present(0, start, end);
-+	}
-+}
++#ifdef __x86_64__
++int iommu_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_merge);
 +
-+/*
-+ * Determine low and high memory ranges:
-+ */
-+unsigned long __init find_max_low_pfn(void)
-+{
-+	unsigned long max_low_pfn;
++dma_addr_t bad_dma_address __read_mostly;
++EXPORT_SYMBOL(bad_dma_address);
 +
-+	max_low_pfn = max_pfn;
-+	if (max_low_pfn > MAXMEM_PFN) {
-+		if (highmem_pages == -1)
-+			highmem_pages = max_pfn - MAXMEM_PFN;
-+		if (highmem_pages + MAXMEM_PFN < max_pfn)
-+			max_pfn = MAXMEM_PFN + highmem_pages;
-+		if (highmem_pages + MAXMEM_PFN > max_pfn) {
-+			printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
-+			highmem_pages = 0;
-+		}
-+		max_low_pfn = MAXMEM_PFN;
-+#ifndef CONFIG_HIGHMEM
-+		/* Maximum memory usable is what is directly addressable */
-+		printk(KERN_WARNING "Warning only %ldMB will be used.\n",
-+					MAXMEM>>20);
-+		if (max_pfn > MAX_NONPAE_PFN)
-+			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
-+		else
-+			printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
-+		max_pfn = MAXMEM_PFN;
-+#else /* !CONFIG_HIGHMEM */
-+#ifndef CONFIG_X86_PAE
-+		if (max_pfn > MAX_NONPAE_PFN) {
-+			max_pfn = MAX_NONPAE_PFN;
-+			printk(KERN_WARNING "Warning only 4GB will be used.\n");
-+			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
-+		}
-+#endif /* !CONFIG_X86_PAE */
-+#endif /* !CONFIG_HIGHMEM */
-+	} else {
-+		if (highmem_pages == -1)
-+			highmem_pages = 0;
-+#ifdef CONFIG_HIGHMEM
-+		if (highmem_pages >= max_pfn) {
-+			printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
-+			highmem_pages = 0;
-+		}
-+		if (highmem_pages) {
-+			if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
-+				printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
-+				highmem_pages = 0;
-+			}
-+			max_low_pfn -= highmem_pages;
-+		}
++/* This tells the BIO block layer to assume merging. Default to off
++   because we cannot guarantee merging later. */
++int iommu_bio_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_bio_merge);
++
++int iommu_sac_force __read_mostly = 0;
++EXPORT_SYMBOL(iommu_sac_force);
++
++int no_iommu __read_mostly;
++#ifdef CONFIG_IOMMU_DEBUG
++int panic_on_overflow __read_mostly = 1;
++int force_iommu __read_mostly = 1;
 +#else
-+		if (highmem_pages)
-+			printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
++int panic_on_overflow __read_mostly = 0;
++int force_iommu __read_mostly= 0;
 +#endif
-+	}
-+	return max_low_pfn;
-+}
 +
-+/*
-+ * Free all available memory for boot time allocation.  Used
-+ * as a callback function by efi_memory_walk()
-+ */
++/* Set this to 1 if there is a HW IOMMU in the system */
++int iommu_detected __read_mostly = 0;
 +
-+static int __init
-+free_available_memory(unsigned long start, unsigned long end, void *arg)
++void __init pci_iommu_alloc(void)
 +{
-+	/* check max_low_pfn */
-+	if (start >= (max_low_pfn << PAGE_SHIFT))
-+		return 0;
-+	if (end >= (max_low_pfn << PAGE_SHIFT))
-+		end = max_low_pfn << PAGE_SHIFT;
-+	if (start < end)
-+		free_bootmem(start, end - start);
++	/*
++	 * The order of these functions is important for
++	 * fall-back/fail-over reasons
++	 */
++#ifdef CONFIG_IOMMU
++	iommu_hole_init();
++#endif
 +
-+	return 0;
-+}
-+/*
-+ * Register fully available low RAM pages with the bootmem allocator.
-+ */
-+static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
-+{
-+	int i;
++#ifdef CONFIG_CALGARY_IOMMU
++#include <asm/calgary.h>
++	/* shut up compiler */
++	use_calgary = use_calgary;
++	detect_calgary();
++#endif
 +
-+	if (efi_enabled) {
-+		efi_memmap_walk(free_available_memory, NULL);
-+		return;
-+	}
-+	for (i = 0; i < e820.nr_map; i++) {
-+		unsigned long curr_pfn, last_pfn, size;
-+		/*
-+		 * Reserve usable low memory
-+		 */
-+		if (e820.map[i].type != E820_RAM)
-+			continue;
-+		/*
-+		 * We are rounding up the start address of usable memory:
-+		 */
-+		curr_pfn = PFN_UP(e820.map[i].addr);
-+		if (curr_pfn >= max_low_pfn)
-+			continue;
-+		/*
-+		 * ... and at the end of the usable range downwards:
-+		 */
-+		last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++#ifdef CONFIG_SWIOTLB
++	pci_swiotlb_init();
++#endif
++}
 +
-+#ifdef CONFIG_XEN
-+		/*
-+                 * Truncate to the number of actual pages currently
-+                 * present.
-+                 */
-+		if (last_pfn > xen_start_info->nr_pages)
-+			last_pfn = xen_start_info->nr_pages;
 +#endif
 +
-+		if (last_pfn > max_low_pfn)
-+			last_pfn = max_low_pfn;
++struct dma_coherent_mem {
++	void		*virt_base;
++	u32		device_base;
++	int		size;
++	int		flags;
++	unsigned long	*bitmap;
++};
++
++#define IOMMU_BUG_ON(test)				\
++do {							\
++	if (unlikely(test)) {				\
++		printk(KERN_ALERT "Fatal DMA error! "	\
++		       "Please use 'swiotlb=force'\n");	\
++		BUG();					\
++	}						\
++} while (0)
 +
-+		/*
-+		 * .. finally, did all the rounding and playing
-+		 * around just make the area go away?
-+		 */
-+		if (last_pfn <= curr_pfn)
-+			continue;
++int
++dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++	   enum dma_data_direction direction)
++{
++	int i, rc;
++
++	BUG_ON(!valid_dma_direction(direction));
++	WARN_ON(nents == 0 || sg[0].length == 0);
 +
-+		size = last_pfn - curr_pfn;
-+		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
++	if (swiotlb) {
++		rc = swiotlb_map_sg(hwdev, sg, nents, direction);
++	} else {
++		for (i = 0; i < nents; i++ ) {
++			sg[i].dma_address =
++				page_to_bus(sg[i].page) + sg[i].offset;
++			sg[i].dma_length  = sg[i].length;
++			BUG_ON(!sg[i].page);
++			IOMMU_BUG_ON(address_needs_mapping(
++				hwdev, sg[i].dma_address));
++		}
++		rc = nents;
 +	}
++
++	flush_write_buffers();
++	return rc;
 +}
++EXPORT_SYMBOL(dma_map_sg);
 +
-+#ifndef CONFIG_XEN
-+/*
-+ * workaround for Dell systems that neglect to reserve EBDA
-+ */
-+static void __init reserve_ebda_region(void)
++void
++dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++	     enum dma_data_direction direction)
 +{
-+	unsigned int addr;
-+	addr = get_bios_ebda();
-+	if (addr)
-+		reserve_bootmem(addr, PAGE_SIZE);	
++	BUG_ON(!valid_dma_direction(direction));
++	if (swiotlb)
++		swiotlb_unmap_sg(hwdev, sg, nents, direction);
 +}
-+#endif
++EXPORT_SYMBOL(dma_unmap_sg);
 +
-+#ifndef CONFIG_NEED_MULTIPLE_NODES
-+void __init setup_bootmem_allocator(void);
-+static unsigned long __init setup_memory(void)
++/*
++ * XXX This file is also used by xenLinux/ia64. 
++ * "defined(__i386__) || defined (__x86_64__)" means "!defined(__ia64__)".
++ * This #if work around should be removed once this file is merbed back into
++ * i386' pci-dma or is moved to drivers/xen/core.
++ */
++#if defined(__i386__) || defined(__x86_64__)
++dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++	     size_t size, enum dma_data_direction direction)
 +{
-+	/*
-+	 * partially used pages are not usable - thus
-+	 * we are rounding upwards:
-+	 */
-+ 	min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
-+		xen_start_info->nr_pt_frames;
-+
-+	find_max_pfn();
-+
-+	max_low_pfn = find_max_low_pfn();
++	dma_addr_t dma_addr;
 +
-+#ifdef CONFIG_HIGHMEM
-+	highstart_pfn = highend_pfn = max_pfn;
-+	if (max_pfn > max_low_pfn) {
-+		highstart_pfn = max_low_pfn;
-+	}
-+	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
-+		pages_to_mb(highend_pfn - highstart_pfn));
-+#endif
-+	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
-+			pages_to_mb(max_low_pfn));
++	BUG_ON(!valid_dma_direction(direction));
 +
-+	setup_bootmem_allocator();
++	if (swiotlb) {
++		dma_addr = swiotlb_map_page(
++			dev, page, offset, size, direction);
++	} else {
++		dma_addr = page_to_bus(page) + offset;
++		IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
++	}
 +
-+	return max_low_pfn;
++	return dma_addr;
 +}
++EXPORT_SYMBOL(dma_map_page);
 +
-+void __init zone_sizes_init(void)
++void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++	       enum dma_data_direction direction)
 +{
-+	unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
-+	unsigned int max_dma, low;
-+
-+	/*
-+	 * XEN: Our notion of "DMA memory" is fake when running over Xen.
-+	 * We simply put all RAM in the DMA zone so that those drivers which
-+	 * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
-+	 * Those drivers that *do* require lowmem are screwed anyway when
-+	 * running over Xen!
-+	 */
-+	max_dma = max_low_pfn;
-+	low = max_low_pfn;
-+
-+	if (low < max_dma)
-+		zones_size[ZONE_DMA] = low;
-+	else {
-+		zones_size[ZONE_DMA] = max_dma;
-+		zones_size[ZONE_NORMAL] = low - max_dma;
-+#ifdef CONFIG_HIGHMEM
-+		zones_size[ZONE_HIGHMEM] = highend_pfn - low;
-+#endif
-+	}
-+	free_area_init(zones_size);
++	BUG_ON(!valid_dma_direction(direction));
++	if (swiotlb)
++		swiotlb_unmap_page(dev, dma_address, size, direction);
 +}
-+#else
-+extern unsigned long __init setup_memory(void);
-+extern void zone_sizes_init(void);
-+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
++EXPORT_SYMBOL(dma_unmap_page);
++#endif /* defined(__i386__) || defined(__x86_64__) */
 +
-+void __init setup_bootmem_allocator(void)
++int
++dma_mapping_error(dma_addr_t dma_addr)
 +{
-+	unsigned long bootmap_size;
-+	/*
-+	 * Initialize the boot-time allocator (with low memory only):
-+	 */
-+	bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
-+
-+	register_bootmem_low_pages(max_low_pfn);
++	if (swiotlb)
++		return swiotlb_dma_mapping_error(dma_addr);
++	return 0;
++}
++EXPORT_SYMBOL(dma_mapping_error);
 +
++int
++dma_supported(struct device *dev, u64 mask)
++{
++	if (swiotlb)
++		return swiotlb_dma_supported(dev, mask);
 +	/*
-+	 * Reserve the bootmem bitmap itself as well. We do this in two
-+	 * steps (first step was init_bootmem()) because this catches
-+	 * the (very unlikely) case of us accidentally initializing the
-+	 * bootmem allocator with an invalid RAM area.
++	 * By default we'll BUG when an infeasible DMA is requested, and
++	 * request swiotlb=force (see IOMMU_BUG_ON).
 +	 */
-+	reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
-+			 bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
++	return 1;
++}
++EXPORT_SYMBOL(dma_supported);
 +
-+#ifndef CONFIG_XEN
-+	/*
-+	 * reserve physical page 0 - it's a special BIOS page on many boxes,
-+	 * enabling clean reboots, SMP operation, laptop functions.
-+	 */
-+	reserve_bootmem(0, PAGE_SIZE);
++void *dma_alloc_coherent(struct device *dev, size_t size,
++			   dma_addr_t *dma_handle, gfp_t gfp)
++{
++	void *ret;
++	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++	unsigned int order = get_order(size);
++	unsigned long vstart;
++	/* ignore region specifiers */
++	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
 +
-+	/* reserve EBDA region, it's a 4K region */
-+	reserve_ebda_region();
++	if (mem) {
++		int page = bitmap_find_free_region(mem->bitmap, mem->size,
++						     order);
++		if (page >= 0) {
++			*dma_handle = mem->device_base + (page << PAGE_SHIFT);
++			ret = mem->virt_base + (page << PAGE_SHIFT);
++			memset(ret, 0, size);
++			return ret;
++		}
++		if (mem->flags & DMA_MEMORY_EXCLUSIVE)
++			return NULL;
++	}
 +
-+    /* could be an AMD 768MPX chipset. Reserve a page  before VGA to prevent
-+       PCI prefetch into it (errata #56). Usually the page is reserved anyways,
-+       unless you have no PS/2 mouse plugged in. */
-+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+	    boot_cpu_data.x86 == 6)
-+	     reserve_bootmem(0xa0000 - 4096, 4096);
++	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
++		gfp |= GFP_DMA;
 +
-+#ifdef CONFIG_SMP
-+	/*
-+	 * But first pinch a few for the stack/trampoline stuff
-+	 * FIXME: Don't need the extra page at 4K, but need to fix
-+	 * trampoline before removing it. (see the GDT stuff)
-+	 */
-+	reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
-+#endif
-+#ifdef CONFIG_ACPI_SLEEP
-+	/*
-+	 * Reserve low memory region for sleep support.
-+	 */
-+	acpi_reserve_bootmem();
-+#endif
-+#endif /* !CONFIG_XEN */
++	vstart = __get_free_pages(gfp, order);
++	ret = (void *)vstart;
 +
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	if (xen_start_info->mod_start) {
-+		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
-+			/*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
-+			initrd_start = INITRD_START + PAGE_OFFSET;
-+			initrd_end = initrd_start+INITRD_SIZE;
-+			initrd_below_start_ok = 1;
-+		}
-+		else {
-+			printk(KERN_ERR "initrd extends beyond end of memory "
-+			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+			    INITRD_START + INITRD_SIZE,
-+			    max_low_pfn << PAGE_SHIFT);
-+			initrd_start = 0;
++	if (ret != NULL) {
++		/* NB. Hardcode 31 address bits for now: aacraid limitation. */
++		if (xen_create_contiguous_region(vstart, order, 31) != 0) {
++			free_pages(vstart, order);
++			return NULL;
 +		}
++		memset(ret, 0, size);
++		*dma_handle = virt_to_bus(ret);
 +	}
-+#endif
-+#ifdef CONFIG_KEXEC
-+	if (crashk_res.start != crashk_res.end)
-+		reserve_bootmem(crashk_res.start,
-+			crashk_res.end - crashk_res.start + 1);
-+#endif
-+
-+	if (!xen_feature(XENFEAT_auto_translated_physmap))
-+		phys_to_machine_mapping =
-+			(unsigned long *)xen_start_info->mfn_list;
++	return ret;
 +}
++EXPORT_SYMBOL(dma_alloc_coherent);
 +
-+/*
-+ * The node 0 pgdat is initialized before all of these because
-+ * it's needed for bootmem.  node>0 pgdats have their virtual
-+ * space allocated before the pagetables are in place to access
-+ * them, so they can't be cleared then.
-+ *
-+ * This should all compile down to nothing when NUMA is off.
-+ */
-+void __init remapped_pgdat_init(void)
++void dma_free_coherent(struct device *dev, size_t size,
++			 void *vaddr, dma_addr_t dma_handle)
 +{
-+	int nid;
++	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++	int order = get_order(size);
++	
++	if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
++		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 +
-+	for_each_online_node(nid) {
-+		if (nid != 0)
-+			memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
++		bitmap_release_region(mem->bitmap, page, order);
++	} else {
++		xen_destroy_contiguous_region((unsigned long)vaddr, order);
++		free_pages((unsigned long)vaddr, order);
 +	}
 +}
++EXPORT_SYMBOL(dma_free_coherent);
 +
-+/*
-+ * Request address space for all standard RAM and ROM resources
-+ * and also for regions reported as reserved by the e820.
-+ */
-+static void __init
-+legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
++#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++				dma_addr_t device_addr, size_t size, int flags)
 +{
-+	int i;
-+	struct e820entry *map = e820.map;
-+	int nr_map = e820.nr_map;
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+	struct xen_memory_map memmap;
-+
-+	map = machine_e820.map;
-+	memmap.nr_entries = E820MAX;
-+
-+	set_xen_guest_handle(memmap.buffer, map);
++	void __iomem *mem_base = NULL;
++	int pages = size >> PAGE_SHIFT;
++	int bitmap_size = (pages + 31)/32;
 +
-+	if(HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
-+		BUG();
-+	machine_e820.nr_map = memmap.nr_entries;
-+	nr_map = memmap.nr_entries;
-+	e820_setup_gap(map, memmap.nr_entries);
-+#endif
++	if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
++		goto out;
++	if (!size)
++		goto out;
++	if (dev->dma_mem)
++		goto out;
 +
-+	probe_roms();
++	/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
 +
-+	for (i = 0; i < nr_map; i++) {
-+		struct resource *res;
-+		if (map[i].addr + map[i].size > 0x100000000ULL)
-+			continue;
-+		res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
-+		switch (map[i].type) {
-+		case E820_RAM:	res->name = "System RAM"; break;
-+		case E820_ACPI:	res->name = "ACPI Tables"; break;
-+		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
-+		default:	res->name = "reserved";
-+		}
-+		res->start = map[i].addr;
-+		res->end = res->start + map[i].size - 1;
-+		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+		if (request_resource(&iomem_resource, res)) {
-+			kfree(res);
-+			continue;
-+		}
-+		if (map[i].type == E820_RAM) {
-+			/*
-+			 *  We don't know which RAM region contains kernel data,
-+			 *  so we try it repeatedly and let the resource manager
-+			 *  test it.
-+			 */
-+#ifndef CONFIG_XEN
-+			request_resource(res, code_resource);
-+			request_resource(res, data_resource);
-+#endif
-+#ifdef CONFIG_KEXEC
-+			request_resource(res, &crashk_res);
-+#endif
-+		}
-+	}
-+}
++	mem_base = ioremap(bus_addr, size);
++	if (!mem_base)
++		goto out;
 +
-+/*
-+ * Request address space for all standard resources
-+ *
-+ * This is called just before pcibios_init(), which is also a
-+ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
-+ */
-+static int __init request_standard_resources(void)
-+{
-+	int i;
++	dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
++	if (!dev->dma_mem)
++		goto out;
++	dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
++	if (!dev->dma_mem->bitmap)
++		goto free1_out;
 +
-+	/* Nothing to do if not running in dom0. */
-+	if (!is_initial_xendomain())
-+		return 0;
++	dev->dma_mem->virt_base = mem_base;
++	dev->dma_mem->device_base = device_addr;
++	dev->dma_mem->size = pages;
++	dev->dma_mem->flags = flags;
 +
-+	printk("Setting up standard PCI resources\n");
-+	if (efi_enabled)
-+		efi_initialize_iomem_resources(&code_resource, &data_resource);
-+	else
-+		legacy_init_iomem_resources(&code_resource, &data_resource);
++	if (flags & DMA_MEMORY_MAP)
++		return DMA_MEMORY_MAP;
 +
-+	/* EFI systems may still have VGA */
-+	request_resource(&iomem_resource, &video_ram_resource);
++	return DMA_MEMORY_IO;
 +
-+	/* request I/O space for devices used on all i[345]86 PCs */
-+	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
-+		request_resource(&ioport_resource, &standard_io_resources[i]);
++ free1_out:
++	kfree(dev->dma_mem->bitmap);
++ out:
++	if (mem_base)
++		iounmap(mem_base);
 +	return 0;
 +}
++EXPORT_SYMBOL(dma_declare_coherent_memory);
 +
-+subsys_initcall(request_standard_resources);
++void dma_release_declared_memory(struct device *dev)
++{
++	struct dma_coherent_mem *mem = dev->dma_mem;
++	
++	if(!mem)
++		return;
++	dev->dma_mem = NULL;
++	iounmap(mem->virt_base);
++	kfree(mem->bitmap);
++	kfree(mem);
++}
++EXPORT_SYMBOL(dma_release_declared_memory);
 +
-+/*
-+ * Locate a unused range of the physical address space below 4G which
-+ * can be used for PCI mappings.
-+ */
-+static void __init
-+e820_setup_gap(struct e820entry *e820, int nr_map)
++void *dma_mark_declared_memory_occupied(struct device *dev,
++					dma_addr_t device_addr, size_t size)
 +{
-+	unsigned long gapstart, gapsize, round;
-+	unsigned long long last;
-+	int i;
++	struct dma_coherent_mem *mem = dev->dma_mem;
++	int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	int pos, err;
 +
-+	/*
-+	 * Search for the bigest gap in the low 32 bits of the e820
-+	 * memory space.
-+	 */
-+	last = 0x100000000ull;
-+	gapstart = 0x10000000;
-+	gapsize = 0x400000;
-+	i = nr_map;
-+	while (--i >= 0) {
-+		unsigned long long start = e820[i].addr;
-+		unsigned long long end = start + e820[i].size;
++	if (!mem)
++		return ERR_PTR(-EINVAL);
 +
-+		/*
-+		 * Since "last" is at most 4GB, we know we'll
-+		 * fit in 32 bits if this condition is true
-+		 */
-+		if (last > end) {
-+			unsigned long gap = last - end;
++	pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
++	err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
++	if (err != 0)
++		return ERR_PTR(err);
++	return mem->virt_base + (pos << PAGE_SHIFT);
++}
++EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
++#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
 +
-+			if (gap > gapsize) {
-+				gapsize = gap;
-+				gapstart = end;
-+			}
-+		}
-+		if (start < last)
-+			last = start;
-+	}
++dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++	       enum dma_data_direction direction)
++{
++	dma_addr_t dma;
 +
-+	/*
-+	 * See how much we want to round up: start off with
-+	 * rounding to the next 1MB area.
-+	 */
-+	round = 0x100000;
-+	while ((gapsize >> 4) > round)
-+		round += round;
-+	/* Fun with two's complement */
-+	pci_mem_start = (gapstart + round) & -round;
++	BUG_ON(!valid_dma_direction(direction));
++	WARN_ON(size == 0);
 +
-+	printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
-+		pci_mem_start, gapstart, gapsize);
++	if (swiotlb) {
++		dma = swiotlb_map_single(dev, ptr, size, direction);
++	} else {
++		dma = virt_to_bus(ptr);
++		IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
++		IOMMU_BUG_ON(address_needs_mapping(dev, dma));
++	}
++
++	flush_write_buffers();
++	return dma;
 +}
++EXPORT_SYMBOL(dma_map_single);
 +
-+static void __init register_memory(void)
++void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++		 enum dma_data_direction direction)
 +{
-+#ifndef CONFIG_XEN
-+	e820_setup_gap(e820.map, e820.nr_map);
-+#endif
++	BUG_ON(!valid_dma_direction(direction));
++	if (swiotlb)
++		swiotlb_unmap_single(dev, dma_addr, size, direction);
 +}
++EXPORT_SYMBOL(dma_unmap_single);
 +
-+#ifdef CONFIG_MCA
-+static void set_mca_bus(int x)
++void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++			enum dma_data_direction direction)
 +{
-+	MCA_bus = x;
++	if (swiotlb)
++		swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
 +}
-+#else
-+static void set_mca_bus(int x) { }
-+#endif
++EXPORT_SYMBOL(dma_sync_single_for_cpu);
 +
++void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++                           enum dma_data_direction direction)
++{
++	if (swiotlb)
++		swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_device);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/process-xen.c b/arch/i386/kernel/process-xen.c
+--- a/arch/i386/kernel/process-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/process-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,851 @@
 +/*
-+ * Determine if we were loaded by an EFI loader.  If so, then we have also been
-+ * passed the efi memmap, systab, etc., so we should use these data structures
-+ * for initialization.  Note, the efi init code path is determined by the
-+ * global efi_enabled. This allows the same kernel image to be used on existing
-+ * systems (with a traditional BIOS) as well as on EFI systems.
++ *  linux/arch/i386/kernel/process.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Pentium III FXSR, SSE support
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
 + */
-+void __init setup_arch(char **cmdline_p)
-+{
-+	int i, j, k, fpp;
-+	struct physdev_set_iopl set_iopl;
-+	unsigned long max_low_pfn;
-+
-+	/* Force a quick death if the kernel panics (not domain 0). */
-+	extern int panic_timeout;
-+	if (!panic_timeout && !is_initial_xendomain())
-+		panic_timeout = 1;
 +
-+	/* Register a call for panic conditions. */
-+	atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
 +
-+	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
-+	HYPERVISOR_vm_assist(VMASST_CMD_enable,
-+			     VMASST_TYPE_writable_pagetables);
++#include <stdarg.h>
 +
-+	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
-+	pre_setup_arch_hook();
-+	early_cpu_init();
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/utsname.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/init.h>
++#include <linux/mc146818rtc.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/random.h>
++#include <linux/personality.h>
 +
-+	/*
-+	 * FIXME: This isn't an official loader_type right
-+	 * now but does currently work with elilo.
-+	 * If we were configured as an EFI kernel, check to make
-+	 * sure that we were loaded correctly from elilo and that
-+	 * the system table is valid.  If not, then initialize normally.
-+	 */
-+#ifdef CONFIG_EFI
-+	if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
-+		efi_enabled = 1;
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/ldt.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/desc.h>
++#include <asm/vm86.h>
++#ifdef CONFIG_MATH_EMULATION
++#include <asm/math_emu.h>
 +#endif
 +
-+	/* This must be initialized to UNNAMED_MAJOR for ipconfig to work
-+	   properly.  Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
-+	*/
-+	ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
-+ 	drive_info = DRIVE_INFO;
-+ 	screen_info = SCREEN_INFO;
-+	edid_info = EDID_INFO;
-+	apm_info.bios = APM_BIOS_INFO;
-+	ist_info = IST_INFO;
-+	saved_videomode = VIDEO_MODE;
-+	if( SYS_DESC_TABLE.length != 0 ) {
-+		set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
-+		machine_id = SYS_DESC_TABLE.table[0];
-+		machine_submodel_id = SYS_DESC_TABLE.table[1];
-+		BIOS_revision = SYS_DESC_TABLE.table[2];
-+	}
-+	bootloader_type = LOADER_TYPE;
-+
-+	if (is_initial_xendomain()) {
-+		/* This is drawn from a dump from vgacon:startup in
-+		 * standard Linux. */
-+		screen_info.orig_video_mode = 3; 
-+		screen_info.orig_video_isVGA = 1;
-+		screen_info.orig_video_lines = 25;
-+		screen_info.orig_video_cols = 80;
-+		screen_info.orig_video_ega_bx = 3;
-+		screen_info.orig_video_points = 16;
-+		screen_info.orig_y = screen_info.orig_video_lines - 1;
-+		if (xen_start_info->console.dom0.info_size >=
-+		    sizeof(struct dom0_vga_console_info)) {
-+			const struct dom0_vga_console_info *info =
-+				(struct dom0_vga_console_info *)(
-+					(char *)xen_start_info +
-+					xen_start_info->console.dom0.info_off);
-+			dom0_init_screen_info(info);
-+		}
-+		xen_start_info->console.domU.mfn = 0;
-+		xen_start_info->console.domU.evtchn = 0;
-+	} else
-+		screen_info.orig_video_isVGA = 0;
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <xen/cpu_hotplug.h>
 +
-+#ifdef CONFIG_BLK_DEV_RAM
-+	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-+#endif
++#include <linux/err.h>
 +
-+	setup_xen_features();
++#include <asm/tlbflush.h>
++#include <asm/cpu.h>
++#include <asm/pda.h>
 +
-+	ARCH_SETUP
-+	if (efi_enabled)
-+		efi_init();
-+	else {
-+		printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+		print_memory_map(machine_specific_memory_setup());
-+	}
++asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 +
-+	copy_edd();
++static int hlt_counter;
 +
-+	if (!MOUNT_ROOT_RDONLY)
-+		root_mountflags &= ~MS_RDONLY;
-+	init_mm.start_code = (unsigned long) _text;
-+	init_mm.end_code = (unsigned long) _etext;
-+	init_mm.end_data = (unsigned long) _edata;
-+	init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
-+		       xen_start_info->nr_pt_frames) << PAGE_SHIFT;
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
 +
-+	code_resource.start = virt_to_phys(_text);
-+	code_resource.end = virt_to_phys(_etext)-1;
-+	data_resource.start = virt_to_phys(_etext);
-+	data_resource.end = virt_to_phys(_edata)-1;
++/*
++ * Return saved PC of a blocked thread.
++ */
++unsigned long thread_saved_pc(struct task_struct *tsk)
++{
++	return ((unsigned long *)tsk->thread.esp)[3];
++}
 +
-+	parse_cmdline_early(cmdline_p);
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++EXPORT_SYMBOL(pm_idle);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
 +
-+#ifdef CONFIG_EARLY_PRINTK
-+	{
-+		char *s = strstr(*cmdline_p, "earlyprintk=");
-+		if (s) {
-+			setup_early_printk(strchr(s, '=') + 1);
-+			printk("early console enabled\n");
-+		}
-+	}
-+#endif
++void disable_hlt(void)
++{
++	hlt_counter++;
++}
 +
-+	max_low_pfn = setup_memory();
++EXPORT_SYMBOL(disable_hlt);
 +
-+	/*
-+	 * NOTE: before this point _nobody_ is allowed to allocate
-+	 * any memory using the bootmem allocator.  Although the
-+	 * alloctor is now initialised only the first 8Mb of the kernel
-+	 * virtual address space has been mapped.  All allocations before
-+	 * paging_init() has completed must use the alloc_bootmem_low_pages()
-+	 * variant (which allocates DMA'able memory) and care must be taken
-+	 * not to exceed the 8Mb limit.
-+	 */
++void enable_hlt(void)
++{
++	hlt_counter--;
++}
 +
-+#ifdef CONFIG_SMP
-+	smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
-+#endif
-+	paging_init();
-+	remapped_pgdat_init();
-+	sparse_init();
-+	zone_sizes_init();
++EXPORT_SYMBOL(enable_hlt);
 +
-+#ifdef CONFIG_X86_FIND_SMP_CONFIG
++/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
++void xen_idle(void)
++{
++	current_thread_info()->status &= ~TS_POLLING;
 +	/*
-+	 * Find and reserve possible boot-time SMP configuration:
++	 * TS_POLLING-cleared state must be visible before we
++	 * test NEED_RESCHED:
 +	 */
-+	find_smp_config();
++	smp_mb();
++
++	local_irq_disable();
++	if (!need_resched())
++		safe_halt();	/* enables interrupts racelessly */
++	else
++		local_irq_enable();
++	current_thread_info()->status |= TS_POLLING;
++}
++#ifdef CONFIG_APM_MODULE
++EXPORT_SYMBOL(default_idle);
 +#endif
 +
-+	/* Make sure we have a correctly sized P->M table. */
-+	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+		phys_to_machine_mapping = alloc_bootmem_low_pages(
-+		     max_pfn * sizeof(unsigned long));
-+		memset(phys_to_machine_mapping, ~0,
-+		       max_pfn * sizeof(unsigned long));
-+		memcpy(phys_to_machine_mapping,
-+		       (unsigned long *)xen_start_info->mfn_list,
-+		       xen_start_info->nr_pages * sizeof(unsigned long));
-+		free_bootmem(
-+		     __pa(xen_start_info->mfn_list),
-+		     PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
-+				     sizeof(unsigned long))));
++#ifdef CONFIG_HOTPLUG_CPU
++extern cpumask_t cpu_initialized;
++static inline void play_dead(void)
++{
++	idle_task_exit();
++	local_irq_disable();
++	cpu_clear(smp_processor_id(), cpu_initialized);
++	preempt_enable_no_resched();
++	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
++	cpu_bringup();
++}
++#else
++static inline void play_dead(void)
++{
++	BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
 +
-+		/*
-+		 * Initialise the list of the frames that specify the list of
-+		 * frames that make up the p2m table. Used by save/restore
-+		 */
-+		pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
-+		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+		     virt_to_mfn(pfn_to_mfn_frame_list_list);
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle(void)
++{
++	int cpu = smp_processor_id();
 +
-+		fpp = PAGE_SIZE/sizeof(unsigned long);
-+		for (i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++) {
-+			if ((j % fpp) == 0) {
-+				k++;
-+				BUG_ON(k>=16);
-+				pfn_to_mfn_frame_list[k] =
-+					alloc_bootmem_low_pages(PAGE_SIZE);
-+				pfn_to_mfn_frame_list_list[k] =
-+					virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+				j=0;
-+			}
-+			pfn_to_mfn_frame_list[k][j] =
-+				virt_to_mfn(&phys_to_machine_mapping[i]);
-+		}
-+		HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
-+	}
++	current_thread_info()->status |= TS_POLLING;
 +
-+	/*
-+	 * NOTE: at this point the bootmem allocator is fully available.
-+	 */
++	/* endless idle loop with no priority at all */
++	while (1) {
++		while (!need_resched()) {
 +
-+	if (is_initial_xendomain())
-+		dmi_scan_machine();
++			if (__get_cpu_var(cpu_idle_state))
++				__get_cpu_var(cpu_idle_state) = 0;
 +
-+#ifdef CONFIG_X86_GENERICARCH
-+	generic_apic_probe(*cmdline_p);
-+#endif	
-+	if (efi_enabled)
-+		efi_map_memmap();
++			rmb();
 +
-+	set_iopl.iopl = 1;
-+	HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++			if (cpu_is_offline(cpu))
++				play_dead();
 +
-+#ifdef CONFIG_ACPI
-+	if (!is_initial_xendomain()) {
-+		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
-+		acpi_disabled = 1;
-+		acpi_ht = 0;
++			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
++			xen_idle();
++		}
++		preempt_enable_no_resched();
++		schedule();
++		preempt_disable();
 +	}
++}
 +
-+	/*
-+	 * Parse the ACPI tables for possible boot-time SMP configuration.
-+	 */
-+	acpi_boot_table_init();
-+#endif
++void cpu_idle_wait(void)
++{
++	unsigned int cpu, this_cpu = get_cpu();
++	cpumask_t map, tmp = current->cpus_allowed;
 +
-+#ifdef CONFIG_X86_IO_APIC
-+	check_acpi_pci();	/* Checks more than just ACPI actually */
-+#endif
++	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++	put_cpu();
 +
-+#ifdef CONFIG_ACPI
-+	acpi_boot_init();
++	cpus_clear(map);
++	for_each_online_cpu(cpu) {
++		per_cpu(cpu_idle_state, cpu) = 1;
++		cpu_set(cpu, map);
++	}
 +
-+#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
-+	if (def_to_bigsmp)
-+		printk(KERN_WARNING "More than 8 CPUs detected and "
-+			"CONFIG_X86_PC cannot handle it.\nUse "
-+			"CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
-+#endif
-+#endif
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	if (smp_found_config)
-+		get_smp_config();
-+#endif
-+#if defined(CONFIG_XEN) && defined(CONFIG_SMP)
-+	prefill_possible_map();
-+#endif
++	__get_cpu_var(cpu_idle_state) = 0;
 +
-+	register_memory();
++	wmb();
++	do {
++		ssleep(1);
++		for_each_online_cpu(cpu) {
++			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
++				cpu_clear(cpu, map);
++		}
++		cpus_and(map, map, cpu_online_map);
++	} while (!cpus_empty(map));
 +
-+	if (is_initial_xendomain()) {
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+		if (!efi_enabled ||
-+		    (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
-+			conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+		conswitchp = &dummy_con;
-+#endif
-+#endif
-+	} else {
-+		extern int console_use_vt;
-+		console_use_vt = 0;
-+	}
-+#ifdef CONFIG_X86_TSC
-+	tsc_init();
-+#endif
++	set_cpus_allowed(current, tmp);
 +}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
 +
-+static int
-+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
-+{
-+	HYPERVISOR_shutdown(SHUTDOWN_crash);
-+	/* we're never actually going to get here... */
-+	return NOTIFY_DONE;
-+}
++/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
++/* Always use xen_idle() instead. */
++void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) {}
 +
-+static __init int add_pcspkr(void)
++void __devinit select_idle_routine(const struct cpuinfo_x86 *c) {}
++
++void show_regs(struct pt_regs * regs)
 +{
-+	struct platform_device *pd;
-+	int ret;
++	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
 +
-+	pd = platform_device_alloc("pcspkr", -1);
-+	if (!pd)
-+		return -ENOMEM;
++	printk("\n");
++	printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
++	printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
++	print_symbol("EIP is at %s\n", regs->eip);
 +
-+	ret = platform_device_add(pd);
-+	if (ret)
-+		platform_device_put(pd);
++	if (user_mode_vm(regs))
++		printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
++	printk(" EFLAGS: %08lx    %s  (%s %.*s)\n",
++	       regs->eflags, print_tainted(), init_utsname()->release,
++	       (int)strcspn(init_utsname()->version, " "),
++	       init_utsname()->version);
++	printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
++		regs->eax,regs->ebx,regs->ecx,regs->edx);
++	printk("ESI: %08lx EDI: %08lx EBP: %08lx",
++		regs->esi, regs->edi, regs->ebp);
++	printk(" DS: %04x ES: %04x GS: %04x\n",
++	       0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs);
 +
-+	return ret;
++	cr0 = read_cr0();
++	cr2 = read_cr2();
++	cr3 = read_cr3();
++	cr4 = read_cr4_safe();
++	printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
++	show_trace(NULL, regs, &regs->esp);
 +}
-+device_initcall(add_pcspkr);
 +
 +/*
-+ * Local Variables:
-+ * mode:c
-+ * c-file-style:"k&r"
-+ * c-basic-offset:8
-+ * End:
++ * This gets run with %ebx containing the
++ * function to call, and %edx containing
++ * the "args".
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/smp-xen.c linux-2.6.18-xen/arch/i386/kernel/smp-xen.c
---- linux-2.6.18.3/arch/i386/kernel/smp-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/smp-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,624 @@
++extern void kernel_thread_helper(void);
++
 +/*
-+ *	Intel SMP support routines.
-+ *
-+ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
-+ *	(c) 1998-99, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	This code is released under the GNU General Public License version 2 or
-+ *	later.
++ * Create a kernel thread
 + */
++int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++{
++	struct pt_regs regs;
 +
-+#include <linux/init.h>
-+
-+#include <linux/mm.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/cache.h>
-+#include <linux/interrupt.h>
-+#include <linux/cpu.h>
-+#include <linux/module.h>
++	memset(&regs, 0, sizeof(regs));
 +
-+#include <asm/mtrr.h>
-+#include <asm/tlbflush.h>
-+#if 0
-+#include <mach_apic.h>
-+#endif
-+#include <xen/evtchn.h>
++	regs.ebx = (unsigned long) fn;
++	regs.edx = (unsigned long) arg;
 +
-+/*
-+ *	Some notes on x86 processor bugs affecting SMP operation:
-+ *
-+ *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
-+ *	The Linux implications for SMP are handled as follows:
-+ *
-+ *	Pentium III / [Xeon]
-+ *		None of the E1AP-E3AP errata are visible to the user.
-+ *
-+ *	E1AP.	see PII A1AP
-+ *	E2AP.	see PII A2AP
-+ *	E3AP.	see PII A3AP
-+ *
-+ *	Pentium II / [Xeon]
-+ *		None of the A1AP-A3AP errata are visible to the user.
-+ *
-+ *	A1AP.	see PPro 1AP
-+ *	A2AP.	see PPro 2AP
-+ *	A3AP.	see PPro 7AP
-+ *
-+ *	Pentium Pro
-+ *		None of 1AP-9AP errata are visible to the normal user,
-+ *	except occasional delivery of 'spurious interrupt' as trap #15.
-+ *	This is very rare and a non-problem.
-+ *
-+ *	1AP.	Linux maps APIC as non-cacheable
-+ *	2AP.	worked around in hardware
-+ *	3AP.	fixed in C0 and above steppings microcode update.
-+ *		Linux does not use excessive STARTUP_IPIs.
-+ *	4AP.	worked around in hardware
-+ *	5AP.	symmetric IO mode (normal Linux operation) not affected.
-+ *		'noapic' mode has vector 0xf filled out properly.
-+ *	6AP.	'noapic' mode might be affected - fixed in later steppings
-+ *	7AP.	We do not assume writes to the LVT deassering IRQs
-+ *	8AP.	We do not enable low power mode (deep sleep) during MP bootup
-+ *	9AP.	We do not use mixed mode
-+ *
-+ *	Pentium
-+ *		There is a marginal case where REP MOVS on 100MHz SMP
-+ *	machines with B stepping processors can fail. XXX should provide
-+ *	an L1cache=Writethrough or L1cache=off option.
-+ *
-+ *		B stepping CPUs may hang. There are hardware work arounds
-+ *	for this. We warn about it in case your board doesn't have the work
-+ *	arounds. Basically thats so I can tell anyone with a B stepping
-+ *	CPU and SMP problems "tough".
-+ *
-+ *	Specific items [From Pentium Processor Specification Update]
-+ *
-+ *	1AP.	Linux doesn't use remote read
-+ *	2AP.	Linux doesn't trust APIC errors
-+ *	3AP.	We work around this
-+ *	4AP.	Linux never generated 3 interrupts of the same priority
-+ *		to cause a lost local interrupt.
-+ *	5AP.	Remote read is never used
-+ *	6AP.	not affected - worked around in hardware
-+ *	7AP.	not affected - worked around in hardware
-+ *	8AP.	worked around in hardware - we get explicit CS errors if not
-+ *	9AP.	only 'noapic' mode affected. Might generate spurious
-+ *		interrupts, we log only the first one and count the
-+ *		rest silently.
-+ *	10AP.	not affected - worked around in hardware
-+ *	11AP.	Linux reads the APIC between writes to avoid this, as per
-+ *		the documentation. Make sure you preserve this as it affects
-+ *		the C stepping chips too.
-+ *	12AP.	not affected - worked around in hardware
-+ *	13AP.	not affected - worked around in hardware
-+ *	14AP.	we always deassert INIT during bootup
-+ *	15AP.	not affected - worked around in hardware
-+ *	16AP.	not affected - worked around in hardware
-+ *	17AP.	not affected - worked around in hardware
-+ *	18AP.	not affected - worked around in hardware
-+ *	19AP.	not affected - worked around in BIOS
-+ *
-+ *	If this sounds worrying believe me these bugs are either ___RARE___,
-+ *	or are signal timing bugs worked around in hardware and there's
-+ *	about nothing of note with C stepping upwards.
-+ */
++	regs.xds = __USER_DS;
++	regs.xes = __USER_DS;
++	regs.xgs = __KERNEL_PDA;
++	regs.orig_eax = -1;
++	regs.eip = (unsigned long) kernel_thread_helper;
++	regs.xcs = __KERNEL_CS | get_kernel_rpl();
++	regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
 +
-+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
++	/* Ok, create the new process.. */
++	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++}
++EXPORT_SYMBOL(kernel_thread);
 +
 +/*
-+ * the following functions deal with sending IPIs between CPUs.
-+ *
-+ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
++ * Free current thread data structures etc..
 + */
-+
-+static inline int __prepare_ICR (unsigned int shortcut, int vector)
++void exit_thread(void)
 +{
-+	unsigned int icr = shortcut | APIC_DEST_LOGICAL;
++	/* The process may have allocated an io port bitmap... nuke it. */
++	if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
++		struct task_struct *tsk = current;
++		struct thread_struct *t = &tsk->thread;
 +
-+	switch (vector) {
-+	default:
-+		icr |= APIC_DM_FIXED | vector;
-+		break;
-+	case NMI_VECTOR:
-+		icr |= APIC_DM_NMI;
-+		break;
++		struct physdev_set_iobitmap set_iobitmap = { 0 };
++		HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
++		kfree(t->io_bitmap_ptr);
++		t->io_bitmap_ptr = NULL;
++		clear_thread_flag(TIF_IO_BITMAP);
 +	}
-+	return icr;
 +}
 +
-+static inline int __prepare_ICR2 (unsigned int mask)
++void flush_thread(void)
 +{
-+	return SET_APIC_DEST_FIELD(mask);
++	struct task_struct *tsk = current;
++
++	memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
++	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
++	clear_tsk_thread_flag(tsk, TIF_DEBUG);
++	/*
++	 * Forget coprocessor state..
++	 */
++	clear_fpu(tsk);
++	clear_used_math();
 +}
 +
-+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++void release_thread(struct task_struct *dead_task)
++{
++	BUG_ON(dead_task->mm);
++	release_vm86_irqs(dead_task);
++}
 +
-+static inline void __send_IPI_one(unsigned int cpu, int vector)
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
 +{
-+	int irq = per_cpu(ipi_to_irq, cpu)[vector];
-+	BUG_ON(irq < 0);
-+	notify_remote_via_irq(irq);
++	unlazy_fpu(tsk);
 +}
 +
-+void __send_IPI_shortcut(unsigned int shortcut, int vector)
++int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
++	unsigned long unused,
++	struct task_struct * p, struct pt_regs * regs)
 +{
-+	int cpu;
++	struct pt_regs * childregs;
++	struct task_struct *tsk;
++	int err;
 +
-+	switch (shortcut) {
-+	case APIC_DEST_SELF:
-+		__send_IPI_one(smp_processor_id(), vector);
-+		break;
-+	case APIC_DEST_ALLBUT:
-+		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+			if (cpu == smp_processor_id())
-+				continue;
-+			if (cpu_isset(cpu, cpu_online_map)) {
-+				__send_IPI_one(cpu, vector);
-+			}
++	childregs = task_pt_regs(p);
++	*childregs = *regs;
++	childregs->eax = 0;
++	childregs->esp = esp;
++
++	p->thread.esp = (unsigned long) childregs;
++	p->thread.esp0 = (unsigned long) (childregs+1);
++
++	p->thread.eip = (unsigned long) ret_from_fork;
++
++	savesegment(fs,p->thread.fs);
++
++	tsk = current;
++	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
++		p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
++						IO_BITMAP_BYTES, GFP_KERNEL);
++		if (!p->thread.io_bitmap_ptr) {
++			p->thread.io_bitmap_max = 0;
++			return -ENOMEM;
 +		}
-+		break;
-+	default:
-+		printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
-+		       vector);
-+		break;
++		set_tsk_thread_flag(p, TIF_IO_BITMAP);
++	}
++
++	/*
++	 * Set a new TLS for the child thread?
++	 */
++	if (clone_flags & CLONE_SETTLS) {
++		struct desc_struct *desc;
++		struct user_desc info;
++		int idx;
++
++		err = -EFAULT;
++		if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
++			goto out;
++		err = -EINVAL;
++		if (LDT_empty(&info))
++			goto out;
++
++		idx = info.entry_number;
++		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++			goto out;
++
++		desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++		desc->a = LDT_entry_a(&info);
++		desc->b = LDT_entry_b(&info);
 +	}
-+}
 +
-+void fastcall send_IPI_self(int vector)
-+{
-+	__send_IPI_shortcut(APIC_DEST_SELF, vector);
++	p->thread.iopl = current->thread.iopl;
++
++	err = 0;
++ out:
++	if (err && p->thread.io_bitmap_ptr) {
++		kfree(p->thread.io_bitmap_ptr);
++		p->thread.io_bitmap_max = 0;
++	}
++	return err;
 +}
 +
 +/*
-+ * This is only used on smaller machines.
++ * fill in the user structure for a core dump..
 + */
-+void send_IPI_mask_bitmask(cpumask_t mask, int vector)
++void dump_thread(struct pt_regs * regs, struct user * dump)
 +{
-+	unsigned long flags;
-+	unsigned int cpu;
++	int i;
 +
-+	local_irq_save(flags);
-+	WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
++/* changed the size calculations - should hopefully work better. lbt */
++	dump->magic = CMAGIC;
++	dump->start_code = 0;
++	dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
++	dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
++	dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
++	dump->u_dsize -= dump->u_tsize;
++	dump->u_ssize = 0;
++	for (i = 0; i < 8; i++)
++		dump->u_debugreg[i] = current->thread.debugreg[i];  
 +
-+	for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+		if (cpu_isset(cpu, mask)) {
-+			__send_IPI_one(cpu, vector);
-+		}
-+	}
++	if (dump->start_stack < TASK_SIZE)
++		dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
 +
-+	local_irq_restore(flags);
++	dump->regs.ebx = regs->ebx;
++	dump->regs.ecx = regs->ecx;
++	dump->regs.edx = regs->edx;
++	dump->regs.esi = regs->esi;
++	dump->regs.edi = regs->edi;
++	dump->regs.ebp = regs->ebp;
++	dump->regs.eax = regs->eax;
++	dump->regs.ds = regs->xds;
++	dump->regs.es = regs->xes;
++	savesegment(fs,dump->regs.fs);
++	dump->regs.gs = regs->xgs;
++	dump->regs.orig_eax = regs->orig_eax;
++	dump->regs.eip = regs->eip;
++	dump->regs.cs = regs->xcs;
++	dump->regs.eflags = regs->eflags;
++	dump->regs.esp = regs->esp;
++	dump->regs.ss = regs->xss;
++
++	dump->u_fpvalid = dump_fpu (regs, &dump->i387);
 +}
++EXPORT_SYMBOL(dump_thread);
 +
-+void send_IPI_mask_sequence(cpumask_t mask, int vector)
++/* 
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 +{
++	struct pt_regs ptregs = *task_pt_regs(tsk);
++	ptregs.xcs &= 0xffff;
++	ptregs.xds &= 0xffff;
++	ptregs.xes &= 0xffff;
++	ptregs.xss &= 0xffff;
 +
-+	send_IPI_mask_bitmask(mask, vector);
++	elf_core_copy_regs(regs, &ptregs);
++
++	return 1;
 +}
 +
-+#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
++static noinline void __switch_to_xtra(struct task_struct *next_p)
++{
++	struct thread_struct *next;
 +
-+#if 0 /* XEN */
-+/*
-+ *	Smarter SMP flushing macros. 
-+ *		c/o Linus Torvalds.
-+ *
-+ *	These mean you can really definitely utterly forget about
-+ *	writing to user space from interrupts. (Its not allowed anyway).
-+ *
-+ *	Optimizations Manfred Spraul <manfred at colorfullife.com>
-+ */
++	next = &next_p->thread;
 +
-+static cpumask_t flush_cpumask;
-+static struct mm_struct * flush_mm;
-+static unsigned long flush_va;
-+static DEFINE_SPINLOCK(tlbstate_lock);
-+#define FLUSH_ALL	0xffffffff
++	if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
++		set_debugreg(next->debugreg[0], 0);
++		set_debugreg(next->debugreg[1], 1);
++		set_debugreg(next->debugreg[2], 2);
++		set_debugreg(next->debugreg[3], 3);
++		/* no 4 and 5 */
++		set_debugreg(next->debugreg[6], 6);
++		set_debugreg(next->debugreg[7], 7);
++	}
++#ifndef CONFIG_XEN
++	if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
++		/*
++		 * Disable the bitmap via an invalid offset. We still cache
++		 * the previous bitmap owner and the IO bitmap contents:
++		 */
++		tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
++		return;
++	}
++
++	if (likely(next == tss->io_bitmap_owner)) {
++		/*
++		 * Previous owner of the bitmap (hence the bitmap content)
++		 * matches the next task, we dont have to do anything but
++		 * to set a valid offset in the TSS:
++		 */
++		tss->io_bitmap_base = IO_BITMAP_OFFSET;
++		return;
++	}
++	/*
++	 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
++	 * and we let the task to get a GPF in case an I/O instruction
++	 * is performed.  The handler of the GPF will verify that the
++	 * faulting task has a valid I/O bitmap and, it true, does the
++	 * real copy and restart the instruction.  This will save us
++	 * redundant copies when the currently switched task does not
++	 * perform any I/O during its timeslice.
++	 */
++	tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
++#endif /* !CONFIG_XEN */
++}
 +
 +/*
-+ * We cannot call mmdrop() because we are in interrupt context, 
-+ * instead update mm->cpu_vm_mask.
-+ *
-+ * We need to reload %cr3 since the page tables may be going
-+ * away from under us..
++ * This function selects if the context switch from prev to next
++ * has to tweak the TSC disable bit in the cr4.
 + */
-+static inline void leave_mm (unsigned long cpu)
++static inline void disable_tsc(struct task_struct *prev_p,
++			       struct task_struct *next_p)
 +{
-+	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-+		BUG();
-+	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
-+	load_cr3(swapper_pg_dir);
++	struct thread_info *prev, *next;
++
++	/*
++	 * gcc should eliminate the ->thread_info dereference if
++	 * has_secure_computing returns 0 at compile time (SECCOMP=n).
++	 */
++	prev = task_thread_info(prev_p);
++	next = task_thread_info(next_p);
++
++	if (has_secure_computing(prev) || has_secure_computing(next)) {
++		/* slow path here */
++		if (has_secure_computing(prev) &&
++		    !has_secure_computing(next)) {
++			write_cr4(read_cr4() & ~X86_CR4_TSD);
++		} else if (!has_secure_computing(prev) &&
++			   has_secure_computing(next))
++			write_cr4(read_cr4() | X86_CR4_TSD);
++	}
 +}
 +
 +/*
++ *	switch_to(x,yn) should switch tasks from x to y.
 + *
-+ * The flush IPI assumes that a thread switch happens in this order:
-+ * [cpu0: the cpu that switches]
-+ * 1) switch_mm() either 1a) or 1b)
-+ * 1a) thread switch to a different mm
-+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
-+ * 	Stop ipi delivery for the old mm. This is not synchronized with
-+ * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
-+ * 	for the wrong mm, and in the worst case we perform a superflous
-+ * 	tlb flush.
-+ * 1a2) set cpu_tlbstate to TLBSTATE_OK
-+ * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
-+ *	was in lazy tlb mode.
-+ * 1a3) update cpu_tlbstate[].active_mm
-+ * 	Now cpu0 accepts tlb flushes for the new mm.
-+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
-+ * 	Now the other cpus will send tlb flush ipis.
-+ * 1a4) change cr3.
-+ * 1b) thread switch without mm change
-+ *	cpu_tlbstate[].active_mm is correct, cpu0 already handles
-+ *	flush ipis.
-+ * 1b1) set cpu_tlbstate to TLBSTATE_OK
-+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
-+ * 	Atomically set the bit [other cpus will start sending flush ipis],
-+ * 	and test the bit.
-+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
-+ * 2) switch %%esp, ie current
++ * We fsave/fwait so that an exception goes off at the right time
++ * (as a call from the fsave or fwait in effect) rather than to
++ * the wrong process. Lazy FP saving no longer makes any sense
++ * with modern CPU's, and this simplifies a lot of things (SMP
++ * and UP become the same).
 + *
-+ * The interrupt must handle 2 special cases:
-+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
-+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
-+ *   runs in kernel space, the cpu could load tlb entries for user space
-+ *   pages.
++ * NOTE! We used to use the x86 hardware context switching. The
++ * reason for not using it any more becomes apparent when you
++ * try to recover gracefully from saved state that is no longer
++ * valid (stale segment register values in particular). With the
++ * hardware task-switch, there is no way to fix up bad state in
++ * a reasonable manner.
 + *
-+ * The good news is that cpu_tlbstate is local to each cpu, no
-+ * write/read ordering problems.
-+ */
-+
-+/*
-+ * TLB flush IPI:
++ * The fact that Intel documents the hardware task-switching to
++ * be slow is a fairly red herring - this code is not noticeably
++ * faster. However, there _is_ some room for improvement here,
++ * so the performance issues may eventually be a valid point.
++ * More important, however, is the fact that this allows us much
++ * more flexibility.
 + *
-+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
-+ * 2) Leave the mm if we are in the lazy tlb mode.
++ * The return value (in %eax) will be the "prev" task after
++ * the task-switch, and shows up in ret_from_fork in entry.S,
++ * for example.
 + */
-+
-+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
-+				     struct pt_regs *regs)
++struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 +{
-+	unsigned long cpu;
++	struct thread_struct *prev = &prev_p->thread,
++				 *next = &next_p->thread;
++	int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++	struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#endif
++	struct physdev_set_iobitmap iobmp_op;
++	multicall_entry_t _mcl[8], *mcl = _mcl;
 +
-+	cpu = get_cpu();
++	/* XEN NOTE: FS/GS saved in switch_mm(), not here. */
 +
-+	if (!cpu_isset(cpu, flush_cpumask))
-+		goto out;
-+		/* 
-+		 * This was a BUG() but until someone can quote me the
-+		 * line from the intel manual that guarantees an IPI to
-+		 * multiple CPUs is retried _only_ on the erroring CPUs
-+		 * its staying as a return
-+		 *
-+		 * BUG();
-+		 */
-+		 
-+	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
-+		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
-+			if (flush_va == FLUSH_ALL)
-+				local_flush_tlb();
-+			else
-+				__flush_tlb_one(flush_va);
-+		} else
-+			leave_mm(cpu);
++	/*
++	 * This is basically '__unlazy_fpu', except that we queue a
++	 * multicall to indicate FPU task switch, rather than
++	 * synchronously trapping to Xen.
++	 */
++	if (prev_p->thread_info->status & TS_USEDFPU) {
++		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++		mcl->op      = __HYPERVISOR_fpu_taskswitch;
++		mcl->args[0] = 1;
++		mcl++;
 +	}
-+	smp_mb__before_clear_bit();
-+	cpu_clear(cpu, flush_cpumask);
-+	smp_mb__after_clear_bit();
-+out:
-+	put_cpu_no_resched();
-+
-+	return IRQ_HANDLED;
-+}
++#if 0 /* lazy fpu sanity check */
++	else BUG_ON(!(read_cr0() & 8));
++#endif
 +
-+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
-+						unsigned long va)
-+{
 +	/*
-+	 * A couple of (to be removed) sanity checks:
-+	 *
-+	 * - current CPU must not be in mask
-+	 * - mask must exist :)
++	 * Reload esp0.
++	 * This is load_esp0(tss, next) with a multicall.
 +	 */
-+	BUG_ON(cpus_empty(cpumask));
-+	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
-+	BUG_ON(!mm);
-+
-+	/* If a CPU which we ran on has gone down, OK. */
-+	cpus_and(cpumask, cpumask, cpu_online_map);
-+	if (cpus_empty(cpumask))
-+		return;
++	mcl->op      = __HYPERVISOR_stack_switch;
++	mcl->args[0] = __KERNEL_DS;
++	mcl->args[1] = next->esp0;
++	mcl++;
 +
 +	/*
-+	 * i'm not happy about this global shared spinlock in the
-+	 * MM hot path, but we'll see how contended it is.
-+	 * Temporarily this turns IRQs off, so that lockups are
-+	 * detected by the NMI watchdog.
++	 * Load the per-thread Thread-Local Storage descriptor.
++	 * This is load_TLS(next, cpu) with multicalls.
 +	 */
-+	spin_lock(&tlbstate_lock);
-+	
-+	flush_mm = mm;
-+	flush_va = va;
-+#if NR_CPUS <= BITS_PER_LONG
-+	atomic_set_mask(cpumask, &flush_cpumask);
-+#else
-+	{
-+		int k;
-+		unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
-+		unsigned long *cpu_mask = (unsigned long *)&cpumask;
-+		for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
-+			atomic_set_mask(cpu_mask[k], &flush_mask[k]);
++#define C(i) do {							\
++	if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||	\
++		     next->tls_array[i].b != prev->tls_array[i].b)) {	\
++		mcl->op = __HYPERVISOR_update_descriptor;		\
++		*(u64 *)&mcl->args[0] =	virt_to_machine(		\
++			&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
++		*(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i];	\
++		mcl++;							\
++	}								\
++} while (0)
++	C(0); C(1); C(2);
++#undef C
++
++	if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++		iobmp_op.bitmap   = (char *)next->io_bitmap_ptr;
++		iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++		mcl->op      = __HYPERVISOR_physdev_op;
++		mcl->args[0] = PHYSDEVOP_set_iobitmap;
++		mcl->args[1] = (unsigned long)&iobmp_op;
++		mcl++;
 +	}
-+#endif
-+	/*
-+	 * We have to send the IPI only to
-+	 * CPUs affected.
-+	 */
-+	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
 +
-+	while (!cpus_empty(flush_cpumask))
-+		/* nothing. lockup detection does not belong here */
-+		mb();
++	(void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
 +
-+	flush_mm = NULL;
-+	flush_va = 0;
-+	spin_unlock(&tlbstate_lock);
-+}
-+	
-+void flush_tlb_current_task(void)
-+{
-+	struct mm_struct *mm = current->mm;
-+	cpumask_t cpu_mask;
++	/*
++	 * Restore %fs if needed.
++	 *
++	 * Glibc normally makes %fs be zero.
++	 */
++	if (unlikely(next->fs))
++		loadsegment(fs, next->fs);
 +
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
++	write_pda(pcurrent, next_p);
 +
-+	local_flush_tlb();
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+	preempt_enable();
-+}
++	/* we're going to use this soon, after a few expensive things */
++	if (next_p->fpu_counter > 5)
++		prefetch(&next->i387.fxsave);
 +
-+void flush_tlb_mm (struct mm_struct * mm)
-+{
-+	cpumask_t cpu_mask;
++	/*
++	 * Now maybe handle debug registers and/or IO bitmaps
++	 */
++	if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
++	    || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
++		__switch_to_xtra(next_p);
 +
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
++	disable_tsc(prev_p, next_p);
 +
-+	if (current->active_mm == mm) {
-+		if (current->mm)
-+			local_flush_tlb();
-+		else
-+			leave_mm(smp_processor_id());
-+	}
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++	/* If the task has used fpu the last 5 timeslices, just do a full
++	 * restore of the math state immediately to avoid the trap; the
++	 * chances of needing FPU soon are obviously high now
++	 */
++	if (next_p->fpu_counter > 5)
++		math_state_restore();
 +
-+	preempt_enable();
++	return prev_p;
 +}
 +
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++asmlinkage int sys_fork(struct pt_regs regs)
 +{
-+	struct mm_struct *mm = vma->vm_mm;
-+	cpumask_t cpu_mask;
-+
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
-+
-+	if (current->active_mm == mm) {
-+		if(current->mm)
-+			__flush_tlb_one(va);
-+		else
-+		 	leave_mm(smp_processor_id());
-+	}
-+
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, va);
-+
-+	preempt_enable();
++	return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
 +}
-+EXPORT_SYMBOL(flush_tlb_page);
 +
-+static void do_flush_tlb_all(void* info)
++asmlinkage int sys_clone(struct pt_regs regs)
 +{
-+	unsigned long cpu = smp_processor_id();
-+
-+	__flush_tlb_all();
-+	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
-+		leave_mm(cpu);
-+}
++	unsigned long clone_flags;
++	unsigned long newsp;
++	int __user *parent_tidptr, *child_tidptr;
 +
-+void flush_tlb_all(void)
-+{
-+	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++	clone_flags = regs.ebx;
++	newsp = regs.ecx;
++	parent_tidptr = (int __user *)regs.edx;
++	child_tidptr = (int __user *)regs.edi;
++	if (!newsp)
++		newsp = regs.esp;
++	return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
 +}
 +
-+#else
-+
-+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
-+				     struct pt_regs *regs)
-+{ return 0; }
-+void flush_tlb_current_task(void)
-+{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
-+void flush_tlb_mm(struct mm_struct * mm)
-+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
-+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
-+EXPORT_SYMBOL(flush_tlb_page);
-+void flush_tlb_all(void)
-+{ xen_tlb_flush_all(); }
-+
-+#endif /* XEN */
-+
 +/*
-+ * this function sends a 'reschedule' IPI to another CPU.
-+ * it goes straight through and wastes no time serializing
-+ * anything. Worst case is that we lose a reschedule ...
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
 + */
-+void smp_send_reschedule(int cpu)
++asmlinkage int sys_vfork(struct pt_regs regs)
 +{
-+	WARN_ON(cpu_is_offline(cpu));
-+	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
 +}
 +
 +/*
-+ * Structure and data for smp_call_function(). This is designed to minimise
-+ * static memory requirements. It also looks cleaner.
++ * sys_execve() executes a new program.
 + */
-+static DEFINE_SPINLOCK(call_lock);
++asmlinkage int sys_execve(struct pt_regs regs)
++{
++	int error;
++	char * filename;
 +
-+struct call_data_struct {
-+	void (*func) (void *info);
-+	void *info;
-+	atomic_t started;
-+	atomic_t finished;
-+	int wait;
-+};
++	filename = getname((char __user *) regs.ebx);
++	error = PTR_ERR(filename);
++	if (IS_ERR(filename))
++		goto out;
++	error = do_execve(filename,
++			(char __user * __user *) regs.ecx,
++			(char __user * __user *) regs.edx,
++			&regs);
++	if (error == 0) {
++		task_lock(current);
++		current->ptrace &= ~PT_DTRACE;
++		task_unlock(current);
++		/* Make sure we don't return using sysenter.. */
++		set_thread_flag(TIF_IRET);
++	}
++	putname(filename);
++out:
++	return error;
++}
 +
-+void lock_ipi_call_lock(void)
++#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
++#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
++
++unsigned long get_wchan(struct task_struct *p)
 +{
-+	spin_lock_irq(&call_lock);
++	unsigned long ebp, esp, eip;
++	unsigned long stack_page;
++	int count = 0;
++	if (!p || p == current || p->state == TASK_RUNNING)
++		return 0;
++	stack_page = (unsigned long)task_stack_page(p);
++	esp = p->thread.esp;
++	if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
++		return 0;
++	/* include/asm-i386/system.h:switch_to() pushes ebp last. */
++	ebp = *(unsigned long *) esp;
++	do {
++		if (ebp < stack_page || ebp > top_ebp+stack_page)
++			return 0;
++		eip = *(unsigned long *) (ebp+4);
++		if (!in_sched_functions(eip))
++			return eip;
++		ebp = *(unsigned long *) ebp;
++	} while (count++ < 16);
++	return 0;
 +}
 +
-+void unlock_ipi_call_lock(void)
++/*
++ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
++ */
++static int get_free_idx(void)
 +{
-+	spin_unlock_irq(&call_lock);
-+}
++	struct thread_struct *t = &current->thread;
++	int idx;
 +
-+static struct call_data_struct *call_data;
++	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
++		if (desc_empty(t->tls_array + idx))
++			return idx + GDT_ENTRY_TLS_MIN;
++	return -ESRCH;
++}
 +
-+/**
-+ * smp_call_function(): Run a function on all other CPUs.
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @nonatomic: currently unused.
-+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
-+ *
-+ * Returns 0 on success, else a negative status code. Does not return until
-+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
-+ *
-+ * You must not call this function with disabled interrupts or from a
-+ * hardware interrupt handler or from a bottom half handler.
++/*
++ * Set a given TLS descriptor:
 + */
-+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-+			int wait)
++asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
 +{
-+	struct call_data_struct data;
-+	int cpus;
++	struct thread_struct *t = &current->thread;
++	struct user_desc info;
++	struct desc_struct *desc;
++	int cpu, idx;
 +
-+	/* Holding any lock stops cpus from going down. */
-+	spin_lock(&call_lock);
-+	cpus = num_online_cpus() - 1;
-+	if (!cpus) {
-+		spin_unlock(&call_lock);
-+		return 0;
++	if (copy_from_user(&info, u_info, sizeof(info)))
++		return -EFAULT;
++	idx = info.entry_number;
++
++	/*
++	 * index -1 means the kernel should try to find and
++	 * allocate an empty descriptor:
++	 */
++	if (idx == -1) {
++		idx = get_free_idx();
++		if (idx < 0)
++			return idx;
++		if (put_user(idx, &u_info->entry_number))
++			return -EFAULT;
 +	}
 +
-+	/* Can deadlock when called with interrupts disabled */
-+	WARN_ON(irqs_disabled());
++	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++		return -EINVAL;
 +
-+	data.func = func;
-+	data.info = info;
-+	atomic_set(&data.started, 0);
-+	data.wait = wait;
-+	if (wait)
-+		atomic_set(&data.finished, 0);
++	desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
 +
-+	call_data = &data;
-+	mb();
-+	
-+	/* Send a message to all other CPUs and wait for them to respond */
-+	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++	/*
++	 * We must not get preempted while modifying the TLS.
++	 */
++	cpu = get_cpu();
 +
-+	/* Wait for response */
-+	while (atomic_read(&data.started) != cpus)
-+		barrier();
++	if (LDT_empty(&info)) {
++		desc->a = 0;
++		desc->b = 0;
++	} else {
++		desc->a = LDT_entry_a(&info);
++		desc->b = LDT_entry_b(&info);
++	}
++	load_TLS(t, cpu);
 +
-+	if (wait)
-+		while (atomic_read(&data.finished) != cpus)
-+			barrier();
-+	spin_unlock(&call_lock);
++	put_cpu();
 +
 +	return 0;
 +}
-+EXPORT_SYMBOL(smp_call_function);
-+
-+static void stop_this_cpu (void * dummy)
-+{
-+	/*
-+	 * Remove this CPU:
-+	 */
-+	cpu_clear(smp_processor_id(), cpu_online_map);
-+	local_irq_disable();
-+#if 0
-+	disable_local_APIC();
-+#endif
-+	if (cpu_data[smp_processor_id()].hlt_works_ok)
-+		for(;;) halt();
-+	for (;;);
-+}
 +
 +/*
-+ * this function calls the 'stop' function on all other CPUs in the system.
++ * Get the current Thread-Local Storage area:
 + */
 +
-+void smp_send_stop(void)
-+{
-+	smp_call_function(stop_this_cpu, NULL, 1, 0);
++#define GET_BASE(desc) ( \
++	(((desc)->a >> 16) & 0x0000ffff) | \
++	(((desc)->b << 16) & 0x00ff0000) | \
++	( (desc)->b        & 0xff000000)   )
 +
-+	local_irq_disable();
-+#if 0
-+	disable_local_APIC();
-+#endif
-+	local_irq_enable();
-+}
++#define GET_LIMIT(desc) ( \
++	((desc)->a & 0x0ffff) | \
++	 ((desc)->b & 0xf0000) )
++	
++#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
++#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
++#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
++#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
++#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
++#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
 +
-+/*
-+ * Reschedule call back. Nothing to do,
-+ * all the work is done automatically when
-+ * we return from the interrupt.
-+ */
-+irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
-+				     struct pt_regs *regs)
++asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
 +{
++	struct user_desc info;
++	struct desc_struct *desc;
++	int idx;
 +
-+	return IRQ_HANDLED;
-+}
++	if (get_user(idx, &u_info->entry_number))
++		return -EFAULT;
++	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++		return -EINVAL;
 +
-+#include <linux/kallsyms.h>
-+irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
-+					struct pt_regs *regs)
-+{
-+	void (*func) (void *info) = call_data->func;
-+	void *info = call_data->info;
-+	int wait = call_data->wait;
++	memset(&info, 0, sizeof(info));
 +
-+	/*
-+	 * Notify initiating CPU that I've grabbed the data and am
-+	 * about to execute the function
-+	 */
-+	mb();
-+	atomic_inc(&call_data->started);
-+	/*
-+	 * At this point the info structure may be out of scope unless wait==1
-+	 */
-+	irq_enter();
-+	(*func)(info);
-+	irq_exit();
++	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
 +
-+	if (wait) {
-+		mb();
-+		atomic_inc(&call_data->finished);
-+	}
++	info.entry_number = idx;
++	info.base_addr = GET_BASE(desc);
++	info.limit = GET_LIMIT(desc);
++	info.seg_32bit = GET_32BIT(desc);
++	info.contents = GET_CONTENTS(desc);
++	info.read_exec_only = !GET_WRITABLE(desc);
++	info.limit_in_pages = GET_LIMIT_PAGES(desc);
++	info.seg_not_present = !GET_PRESENT(desc);
++	info.useable = GET_USEABLE(desc);
 +
-+	return IRQ_HANDLED;
++	if (copy_to_user(u_info, &info, sizeof(info)))
++		return -EFAULT;
++	return 0;
 +}
 +
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/swiotlb.c linux-2.6.18-xen/arch/i386/kernel/swiotlb.c
---- linux-2.6.18.3/arch/i386/kernel/swiotlb.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/swiotlb.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,672 @@
++unsigned long arch_align_stack(unsigned long sp)
++{
++	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
++		sp -= get_random_int() % 8192;
++	return sp & ~0xf;
++}
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/quirks.c b/arch/i386/kernel/quirks.c
+--- a/arch/i386/kernel/quirks.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/quirks.c	2007-03-14 10:55:14.000000000 +0100
+@@ -7,7 +7,7 @@
+ #include <asm/genapic.h>
+ #include <asm/cpu.h>
+ 
+-#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
++#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
+ static void __devinit verify_quirk_intel_irqbalance(struct pci_dev *dev)
+ {
+ 	u8 config, rev;
+@@ -68,11 +68,19 @@
+ 	word = read_pci_config_16(0, 0, 0x40, 0x4c);
+ 
+ 	if (!(word & (1 << 13))) {
++#ifdef CONFIG_XEN
++		dom0_op_t op;
++		printk(KERN_INFO "Disabling irq balancing and affinity\n");
++		op.cmd = DOM0_PLATFORM_QUIRK;
++		op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
++		(void)HYPERVISOR_dom0_op(&op);
++#else
+ 		printk(KERN_INFO "Disabling irq balancing and affinity\n");
+ #ifdef CONFIG_IRQBALANCE
+ 		irqbalance_disable("");
+ #endif
+ 		noirqdebug_setup("");
++#endif /* CONFIG_XEN */
+ #ifdef CONFIG_PROC_FS
+ 		no_irq_affinity = 1;
+ #endif
+@@ -80,7 +88,7 @@
+ 		printk(KERN_INFO "Disabling cpu hotplug control\n");
+ 		enable_cpu_hotplug = 0;
+ #endif
+-#ifdef CONFIG_X86_64
++#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
+ 		/* force the genapic selection to flat mode so that
+ 		 * interrupts can be redirected to more than one CPU.
+ 		 */
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/setup-xen.c b/arch/i386/kernel/setup-xen.c
+--- a/arch/i386/kernel/setup-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/setup-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,835 @@
 +/*
-+ * Dynamic DMA mapping support.
++ *  linux/arch/i386/kernel/setup.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ *
++ *  Memory region support
++ *	David Parsons <orc at pell.chi.il.us>, July-August 1999
++ *
++ *  Added E820 sanitization routine (removes overlapping memory regions);
++ *  Brian Moyle <bmoyle at mvista.com>, February 2001
++ *
++ * Moved CPU detection code to cpu/${cpu}.c
++ *    Patrick Mochel <mochel at osdl.org>, March 2002
++ *
++ *  Provisions for empty E820 memory regions (reported by certain BIOSes).
++ *  Alex Achenbach <xela at slit.de>, December 2002.
 + *
-+ * This implementation is a fallback for platforms that do not support
-+ * I/O TLBs (aka DMA address translation hardware).
-+ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick at intel.com>
-+ * Copyright (C) 2000 Goutham Rao <goutham.rao at intel.com>
-+ * Copyright (C) 2000, 2003 Hewlett-Packard Co
-+ *	David Mosberger-Tang <davidm at hpl.hp.com>
-+ * Copyright (C) 2005 Keir Fraser <keir at xensource.com>
 + */
 +
-+#include <linux/cache.h>
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/sched.h>
 +#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
++#include <linux/mmzone.h>
++#include <linux/screen_info.h>
++#include <linux/ioport.h>
++#include <linux/acpi.h>
++#include <linux/apm_bios.h>
++#include <linux/initrd.h>
 +#include <linux/bootmem.h>
++#include <linux/seq_file.h>
++#include <linux/platform_device.h>
++#include <linux/console.h>
++#include <linux/mca.h>
++#include <linux/root_dev.h>
 +#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/efi.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <linux/nodemask.h>
++#include <linux/notifier.h>
++#include <linux/kexec.h>
++#include <linux/crash_dump.h>
++#include <linux/dmi.h>
++#include <linux/pfn.h>
++
++#include <video/edid.h>
++
++#include <asm/apic.h>
++#include <asm/e820.h>
++#include <asm/mpspec.h>
++#include <asm/mmzone.h>
++#include <asm/setup.h>
++#include <asm/arch_hooks.h>
++#include <asm/sections.h>
++#include <asm/io_apic.h>
++#include <asm/ist.h>
 +#include <asm/io.h>
-+#include <asm/pci.h>
-+#include <asm/dma.h>
-+#include <asm/uaccess.h>
++#include <setup_arch.h>
++#include <bios_ebda.h>
++
++#include <asm/hypervisor.h>
++#include <xen/interface/physdev.h>
 +#include <xen/interface/memory.h>
++#include <xen/features.h>
++#include <xen/xencons.h>
 +
-+int swiotlb;
-+EXPORT_SYMBOL(swiotlb);
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++	xen_panic_event, NULL, 0 /* try to go last */
++};
 +
-+#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
 +
-+#define SG_ENT_PHYS_ADDRESS(sg)	(page_to_bus((sg)->page) + (sg)->offset)
++int disable_pse __devinitdata = 0;
 +
 +/*
-+ * Maximum allowable number of contiguous slabs to map,
-+ * must be a power of 2.  What is the appropriate value ?
-+ * The complexity of {map,unmap}_single is linearly dependent on this value.
++ * Machine setup..
 + */
-+#define IO_TLB_SEGSIZE	128
++extern struct resource code_resource;
++extern struct resource data_resource;
 +
-+/*
-+ * log of the size of each IO TLB slab.  The number of slabs is command line
-+ * controllable.
-+ */
-+#define IO_TLB_SHIFT 11
++/* cpu data as detected by the assembly code in head.S */
++struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++/* common cpu data for all cpus */
++struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++EXPORT_SYMBOL(boot_cpu_data);
 +
-+/* Width of DMA addresses in the IO TLB. 31 bits is an aacraid limitation. */
-+#define IO_TLB_DMA_BITS 31
++unsigned long mmu_cr4_features;
 +
-+int swiotlb_force;
-+static char *iotlb_virt_start;
-+static unsigned long iotlb_nslabs;
++/* for MCA, but anyone else can use it if they want */
++unsigned int machine_id;
++#ifdef CONFIG_MCA
++EXPORT_SYMBOL(machine_id);
++#endif
++unsigned int machine_submodel_id;
++unsigned int BIOS_revision;
++unsigned int mca_pentium_flag;
 +
-+/*
-+ * Used to do a quick range check in swiotlb_unmap_single and
-+ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
-+ * API.
-+ */
-+static unsigned long iotlb_pfn_start, iotlb_pfn_end;
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
 +
-+/* Does the given dma address reside within the swiotlb aperture? */
-+static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
-+{
-+	unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
-+	return (pfn_valid(pfn)
-+		&& (pfn >= iotlb_pfn_start)
-+		&& (pfn < iotlb_pfn_end));
-+}
++/* user-defined highmem size */
++static unsigned int highmem_pages = -1;
 +
 +/*
-+ * When the IOMMU overflows we return a fallback buffer. This sets the size.
++ * Setup options
 + */
-+static unsigned long io_tlb_overflow = 32*1024;
++struct drive_info_struct { char dummy[32]; } drive_info;
++#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
++    defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
++EXPORT_SYMBOL(drive_info);
++#endif
++struct screen_info screen_info;
++EXPORT_SYMBOL(screen_info);
++struct apm_info apm_info;
++EXPORT_SYMBOL(apm_info);
++struct sys_desc_table_struct {
++	unsigned short length;
++	unsigned char table[0];
++};
++struct edid_info edid_info;
++EXPORT_SYMBOL_GPL(edid_info);
++struct ist_info ist_info;
++#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
++	defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
++EXPORT_SYMBOL(ist_info);
++#endif
 +
-+void *io_tlb_overflow_buffer;
++extern void early_cpu_init(void);
++extern int root_mountflags;
 +
-+/*
-+ * This is a free list describing the number of free entries available from
-+ * each index
-+ */
-+static unsigned int *io_tlb_list;
-+static unsigned int io_tlb_index;
++unsigned long saved_videomode;
 +
-+/*
-+ * We need to save away the original address corresponding to a mapped entry
-+ * for the sync operations.
-+ */
-+static struct phys_addr {
-+	struct page *page;
-+	unsigned int offset;
-+} *io_tlb_orig_addr;
++#define RAMDISK_IMAGE_START_MASK  	0x07FF
++#define RAMDISK_PROMPT_FLAG		0x8000
++#define RAMDISK_LOAD_FLAG		0x4000	
 +
-+/*
-+ * Protect the above data structures in the map and unmap calls
-+ */
-+static DEFINE_SPINLOCK(io_tlb_lock);
++static char command_line[COMMAND_LINE_SIZE];
 +
-+static int __init
-+setup_io_tlb_npages(char *str)
-+{
-+	/* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
-+	if (isdigit(*str)) {
-+		iotlb_nslabs = simple_strtoul(str, &str, 0) <<
-+			(20 - IO_TLB_SHIFT);
-+		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
-+		/* Round up to power of two (xen_create_contiguous_region). */
-+		while (iotlb_nslabs & (iotlb_nslabs-1))
-+			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
-+	}
-+	if (*str == ',')
-+		++str;
-+	/*
-+         * NB. 'force' enables the swiotlb, but doesn't force its use for
-+         * every DMA like it does on native Linux. 'off' forcibly disables
-+         * use of the swiotlb.
-+         */
-+	if (!strcmp(str, "force"))
-+		swiotlb_force = 1;
-+	else if (!strcmp(str, "off"))
-+		swiotlb_force = -1;
-+	return 1;
-+}
-+__setup("swiotlb=", setup_io_tlb_npages);
-+/* make io_tlb_overflow tunable too? */
++unsigned char __initdata boot_params[PARAM_SIZE];
 +
 +/*
-+ * Statically reserve bounce buffer space and initialize bounce buffer data
-+ * structures for the software IO TLB used to implement the PCI DMA API.
++ * Point at the empty zero page to start with. We map the real shared_info
++ * page as soon as fixmap is up and running.
 + */
-+void
-+swiotlb_init_with_default_size (size_t default_size)
-+{
-+	unsigned long i, bytes;
-+
-+	if (!iotlb_nslabs) {
-+		iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
-+		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
-+		/* Round up to power of two (xen_create_contiguous_region). */
-+		while (iotlb_nslabs & (iotlb_nslabs-1))
-+			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
-+	}
-+
-+	bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
-+
-+	/*
-+	 * Get IO TLB memory from the low pages
-+	 */
-+	iotlb_virt_start = alloc_bootmem_low_pages(bytes);
-+	if (!iotlb_virt_start)
-+		panic("Cannot allocate SWIOTLB buffer!\n"
-+		      "Use dom0_mem Xen boot parameter to reserve\n"
-+		      "some DMA memory (e.g., dom0_mem=-128M).\n");
-+
-+	for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
-+		int rc = xen_create_contiguous_region(
-+			(unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
-+			get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
-+			IO_TLB_DMA_BITS);
-+		BUG_ON(rc);
-+	}
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
 +
-+	/*
-+	 * Allocate and initialize the free list array.  This array is used
-+	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
-+	 */
-+	io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
-+	for (i = 0; i < iotlb_nslabs; i++)
-+ 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-+	io_tlb_index = 0;
-+	io_tlb_orig_addr = alloc_bootmem(
-+		iotlb_nslabs * sizeof(*io_tlb_orig_addr));
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
++EXPORT_SYMBOL(phys_to_machine_mapping);
 +
-+	/*
-+	 * Get the overflow emergency buffer
-+	 */
-+	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
 +
-+	iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
-+	iotlb_pfn_end   = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
 +
-+	printk(KERN_INFO "Software IO TLB enabled: \n"
-+	       " Aperture:     %lu megabytes\n"
-+	       " Kernel range: 0x%016lx - 0x%016lx\n",
-+	       bytes >> 20,
-+	       (unsigned long)iotlb_virt_start,
-+	       (unsigned long)iotlb_virt_start + bytes);
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ *              from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++     edd.edd_info_nr = EDD_NR;
 +}
-+
-+void
-+swiotlb_init(void)
++#else
++static inline void copy_edd(void)
 +{
-+	long ram_end;
-+	size_t defsz = 64 * (1 << 20); /* 64MB default size */
-+
-+	if (swiotlb_force == 1) {
-+		swiotlb = 1;
-+	} else if ((swiotlb_force != -1) &&
-+		   is_running_on_xen() &&
-+		   is_initial_xendomain()) {
-+		/* Domain 0 always has a swiotlb. */
-+		ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
-+		if (ram_end <= 0x7ffff)
-+			defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
-+		swiotlb = 1;
-+	}
-+
-+	if (swiotlb)
-+		swiotlb_init_with_default_size(defsz);
-+	else
-+		printk(KERN_INFO "Software IO TLB disabled\n");
 +}
++#endif
++
++int __initdata user_defined_memmap = 0;
 +
 +/*
-+ * We use __copy_to_user_inatomic to transfer to the host buffer because the
-+ * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
-+ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
-+ * unnecessary copy from the aperture to the host buffer, and a page fault.
++ * "mem=nopentium" disables the 4MB page tables.
++ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
++ * to <mem>, overriding the bios size.
++ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
++ * <start> to <start>+<mem>, overriding the bios size.
++ *
++ * HPA tells me bootloaders need to parse mem=, so no new
++ * option should be mem=  [also see Documentation/i386/boot.txt]
 + */
-+static void
-+__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
++static int __init parse_mem(char *arg)
 +{
-+	if (PageHighMem(buffer.page)) {
-+		size_t len, bytes;
-+		char *dev, *host, *kmp;
-+		len = size;
-+		while (len != 0) {
-+			if (((bytes = len) + buffer.offset) > PAGE_SIZE)
-+				bytes = PAGE_SIZE - buffer.offset;
-+			kmp  = kmap_atomic(buffer.page, KM_SWIOTLB);
-+			dev  = dma_addr + size - len;
-+			host = kmp + buffer.offset;
-+			if (dir == DMA_FROM_DEVICE) {
-+				if (__copy_to_user_inatomic(host, dev, bytes))
-+					/* inaccessible */;
-+			} else
-+				memcpy(dev, host, bytes);
-+			kunmap_atomic(kmp, KM_SWIOTLB);
-+			len -= bytes;
-+			buffer.page++;
-+			buffer.offset = 0;
-+		}
++	if (!arg)
++		return -EINVAL;
++
++	if (strcmp(arg, "nopentium") == 0) {
++		clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++		disable_pse = 1;
 +	} else {
-+		char *host = (char *)phys_to_virt(
-+			page_to_pseudophys(buffer.page)) + buffer.offset;
-+		if (dir == DMA_FROM_DEVICE) {
-+			if (__copy_to_user_inatomic(host, dma_addr, size))
-+				/* inaccessible */;
-+		} else if (dir == DMA_TO_DEVICE)
-+			memcpy(dma_addr, host, size);
++		/* If the user specifies memory size, we
++		 * limit the BIOS-provided memory map to
++		 * that size. exactmap can be used to specify
++		 * the exact map. mem=number can be used to
++		 * trim the existing memory map.
++		 */
++		unsigned long long mem_size;
++ 
++		mem_size = memparse(arg, &arg);
++		limit_regions(mem_size);
++		user_defined_memmap = 1;
 +	}
++	return 0;
++}
++early_param("mem", parse_mem);
++
++#ifdef CONFIG_PROC_VMCORE
++/* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel.
++ */
++static int __init parse_elfcorehdr(char *arg)
++{
++	if (!arg)
++		return -EINVAL;
++
++	elfcorehdr_addr = memparse(arg, &arg);
++	return 0;
 +}
++early_param("elfcorehdr", parse_elfcorehdr);
++#endif /* CONFIG_PROC_VMCORE */
 +
 +/*
-+ * Allocates bounce buffer and returns its kernel virtual address.
++ * highmem=size forces highmem to be exactly 'size' bytes.
++ * This works even on boxes that have no highmem otherwise.
++ * This also works to reduce highmem size on bigger boxes.
 + */
-+static void *
-+map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
++static int __init parse_highmem(char *arg)
 +{
-+	unsigned long flags;
-+	char *dma_addr;
-+	unsigned int nslots, stride, index, wrap;
-+	int i;
++	if (!arg)
++		return -EINVAL;
 +
-+	/*
-+	 * For mappings greater than a page, we limit the stride (and
-+	 * hence alignment) to a page size.
-+	 */
-+	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-+	if (size > PAGE_SIZE)
-+		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
-+	else
-+		stride = 1;
++	highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
++	return 0;
++}
++early_param("highmem", parse_highmem);
 +
-+	BUG_ON(!nslots);
++/*
++ * vmalloc=size forces the vmalloc area to be exactly 'size'
++ * bytes. This can be used to increase (or decrease) the
++ * vmalloc area - the default is 128m.
++ */
++static int __init parse_vmalloc(char *arg)
++{
++	if (!arg)
++		return -EINVAL;
 +
-+	/*
-+	 * Find suitable number of IO TLB entries size that will fit this
-+	 * request and allocate a buffer from that IO TLB pool.
-+	 */
-+	spin_lock_irqsave(&io_tlb_lock, flags);
-+	{
-+		wrap = index = ALIGN(io_tlb_index, stride);
++	__VMALLOC_RESERVE = memparse(arg, &arg);
++	return 0;
++}
++early_param("vmalloc", parse_vmalloc);
 +
-+		if (index >= iotlb_nslabs)
-+			wrap = index = 0;
++/*
++ * reservetop=size reserves a hole at the top of the kernel address space which
++ * a hypervisor can load into later.  Needed for dynamically loaded hypervisors,
++ * so relocating the fixmap can be done before paging initialization.
++ */
++static int __init parse_reservetop(char *arg)
++{
++	unsigned long address;
 +
-+		do {
-+			/*
-+			 * If we find a slot that indicates we have 'nslots'
-+			 * number of contiguous buffers, we allocate the
-+			 * buffers from that slot and mark the entries as '0'
-+			 * indicating unavailable.
-+			 */
-+			if (io_tlb_list[index] >= nslots) {
-+				int count = 0;
++	if (!arg)
++		return -EINVAL;
 +
-+				for (i = index; i < (int)(index + nslots); i++)
-+					io_tlb_list[i] = 0;
-+				for (i = index - 1;
-+				     (OFFSET(i, IO_TLB_SEGSIZE) !=
-+				      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
-+				     i--)
-+					io_tlb_list[i] = ++count;
-+				dma_addr = iotlb_virt_start +
-+					(index << IO_TLB_SHIFT);
++	address = memparse(arg, &arg);
++	reserve_top_address(address);
++	return 0;
++}
++early_param("reservetop", parse_reservetop);
 +
-+				/*
-+				 * Update the indices to avoid searching in
-+				 * the next round.
-+				 */
-+				io_tlb_index = 
-+					((index + nslots) < iotlb_nslabs
-+					 ? (index + nslots) : 0);
++/*
++ * Determine low and high memory ranges:
++ */
++unsigned long __init find_max_low_pfn(void)
++{
++	unsigned long max_low_pfn;
 +
-+				goto found;
++	max_low_pfn = max_pfn;
++	if (max_low_pfn > MAXMEM_PFN) {
++		if (highmem_pages == -1)
++			highmem_pages = max_pfn - MAXMEM_PFN;
++		if (highmem_pages + MAXMEM_PFN < max_pfn)
++			max_pfn = MAXMEM_PFN + highmem_pages;
++		if (highmem_pages + MAXMEM_PFN > max_pfn) {
++			printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
++			highmem_pages = 0;
++		}
++		max_low_pfn = MAXMEM_PFN;
++#ifndef CONFIG_HIGHMEM
++		/* Maximum memory usable is what is directly addressable */
++		printk(KERN_WARNING "Warning only %ldMB will be used.\n",
++					MAXMEM>>20);
++		if (max_pfn > MAX_NONPAE_PFN)
++			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++		else
++			printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
++		max_pfn = MAXMEM_PFN;
++#else /* !CONFIG_HIGHMEM */
++#ifndef CONFIG_X86_PAE
++		if (max_pfn > MAX_NONPAE_PFN) {
++			max_pfn = MAX_NONPAE_PFN;
++			printk(KERN_WARNING "Warning only 4GB will be used.\n");
++			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++		}
++#endif /* !CONFIG_X86_PAE */
++#endif /* !CONFIG_HIGHMEM */
++	} else {
++		if (highmem_pages == -1)
++			highmem_pages = 0;
++#ifdef CONFIG_HIGHMEM
++		if (highmem_pages >= max_pfn) {
++			printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
++			highmem_pages = 0;
++		}
++		if (highmem_pages) {
++			if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
++				printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
++				highmem_pages = 0;
 +			}
-+			index += stride;
-+			if (index >= iotlb_nslabs)
-+				index = 0;
-+		} while (index != wrap);
-+
-+		spin_unlock_irqrestore(&io_tlb_lock, flags);
-+		return NULL;
++			max_low_pfn -= highmem_pages;
++		}
++#else
++		if (highmem_pages)
++			printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
++#endif
 +	}
-+  found:
-+	spin_unlock_irqrestore(&io_tlb_lock, flags);
-+
-+	/*
-+	 * Save away the mapping from the original address to the DMA address.
-+	 * This is needed when we sync the memory.  Then we sync the buffer if
-+	 * needed.
-+	 */
-+	io_tlb_orig_addr[index] = buffer;
-+	if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
-+		__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
-+
-+	return dma_addr;
++	return max_low_pfn;
 +}
 +
++#ifndef CONFIG_XEN
 +/*
-+ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
++ * workaround for Dell systems that neglect to reserve EBDA
 + */
-+static void
-+unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++static void __init reserve_ebda_region(void)
 +{
-+	unsigned long flags;
-+	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-+	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-+	struct phys_addr buffer = io_tlb_orig_addr[index];
++	unsigned int addr;
++	addr = get_bios_ebda();
++	if (addr)
++		reserve_bootmem(addr, PAGE_SIZE);	
++}
++#endif
 +
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++void __init setup_bootmem_allocator(void);
++static unsigned long __init setup_memory(void)
++{
 +	/*
-+	 * First, sync the memory before unmapping the entry
++	 * partially used pages are not usable - thus
++	 * we are rounding upwards:
 +	 */
-+	if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
-+		__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
++ 	min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
++		xen_start_info->nr_pt_frames;
 +
-+	/*
-+	 * Return the buffer to the free list by setting the corresponding
-+	 * entries to indicate the number of contigous entries available.
-+	 * While returning the entries to the free list, we merge the entries
-+	 * with slots below and above the pool being returned.
-+	 */
-+	spin_lock_irqsave(&io_tlb_lock, flags);
-+	{
-+		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
-+			 io_tlb_list[index + nslots] : 0);
-+		/*
-+		 * Step 1: return the slots to the free list, merging the
-+		 * slots with superceeding slots
-+		 */
-+		for (i = index + nslots - 1; i >= index; i--)
-+			io_tlb_list[i] = ++count;
-+		/*
-+		 * Step 2: merge the returned slots with the preceding slots,
-+		 * if available (non zero)
-+		 */
-+		for (i = index - 1;
-+		     (OFFSET(i, IO_TLB_SEGSIZE) !=
-+		      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
-+		     i--)
-+			io_tlb_list[i] = ++count;
++	find_max_pfn();
++
++	max_low_pfn = find_max_low_pfn();
++
++#ifdef CONFIG_HIGHMEM
++	highstart_pfn = highend_pfn = max_pfn;
++	if (max_pfn > max_low_pfn) {
++		highstart_pfn = max_low_pfn;
 +	}
-+	spin_unlock_irqrestore(&io_tlb_lock, flags);
-+}
++	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
++		pages_to_mb(highend_pfn - highstart_pfn));
++	num_physpages = highend_pfn;
++	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
++#else
++	num_physpages = max_low_pfn;
++	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
++#endif
++#ifdef CONFIG_FLATMEM
++	max_mapnr = num_physpages;
++#endif
++	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
++			pages_to_mb(max_low_pfn));
 +
-+static void
-+sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
-+{
-+	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-+	struct phys_addr buffer = io_tlb_orig_addr[index];
-+	BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
-+	__sync_single(buffer, dma_addr, size, dir);
++	setup_bootmem_allocator();
++
++	return max_low_pfn;
 +}
 +
-+static void
-+swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
++void __init zone_sizes_init(void)
 +{
 +	/*
-+	 * Ran out of IOMMU space for this operation. This is very bad.
-+	 * Unfortunately the drivers cannot handle this operation properly.
-+	 * unless they check for pci_dma_mapping_error (most don't)
-+	 * When the mapping is small enough return a static buffer to limit
-+	 * the damage, or panic when the transfer is too big.
++	 * XEN: Our notion of "DMA memory" is fake when running over Xen.
++	 * We simply put all RAM in the DMA zone so that those drivers which
++	 * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
++	 * Those drivers that *do* require lowmem are screwed anyway when
++	 * running over Xen!
 +	 */
-+	printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
-+	       "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
++ 	unsigned long max_zone_pfns[MAX_NR_ZONES];
++ 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
++ 	max_zone_pfns[ZONE_DMA] = max_low_pfn;
++ 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
++#ifdef CONFIG_HIGHMEM
++	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
++	add_active_range(0, 0, highend_pfn);
++#else
++	add_active_range(0, 0, max_low_pfn);
++#endif
 +
-+	if (size > io_tlb_overflow && do_panic) {
-+		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
-+			panic("PCI-DMA: Memory would be corrupted\n");
-+		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
-+			panic("PCI-DMA: Random memory would be DMAed\n");
-+	}
++	free_area_init_nodes(max_zone_pfns);
 +}
++#else
++extern unsigned long __init setup_memory(void);
++extern void zone_sizes_init(void);
++#endif /* !CONFIG_NEED_MULTIPLE_NODES */
 +
-+/*
-+ * Map a single buffer of the indicated size for DMA in streaming mode.  The
-+ * PCI address to use is returned.
-+ *
-+ * Once the device is given the dma address, the device owns this memory until
-+ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
-+ */
-+dma_addr_t
-+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
++void __init setup_bootmem_allocator(void)
 +{
-+	dma_addr_t dev_addr = virt_to_bus(ptr);
-+	void *map;
-+	struct phys_addr buffer;
++	unsigned long bootmap_size;
++	/*
++	 * Initialize the boot-time allocator (with low memory only):
++	 */
++	bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
 +
-+	BUG_ON(dir == DMA_NONE);
++	register_bootmem_low_pages(max_low_pfn);
 +
 +	/*
-+	 * If the pointer passed in happens to be in the device's DMA window,
-+	 * we can safely return the device addr and not worry about bounce
-+	 * buffering it.
++	 * Reserve the bootmem bitmap itself as well. We do this in two
++	 * steps (first step was init_bootmem()) because this catches
++	 * the (very unlikely) case of us accidentally initializing the
++	 * bootmem allocator with an invalid RAM area.
 +	 */
-+	if (!range_straddles_page_boundary(ptr, size) &&
-+	    !address_needs_mapping(hwdev, dev_addr))
-+		return dev_addr;
++	reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
++			 bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text));
 +
++#ifndef CONFIG_XEN
 +	/*
-+	 * Oh well, have to allocate and map a bounce buffer.
++	 * reserve physical page 0 - it's a special BIOS page on many boxes,
++	 * enabling clean reboots, SMP operation, laptop functions.
 +	 */
-+	buffer.page   = virt_to_page(ptr);
-+	buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
-+	map = map_single(hwdev, buffer, size, dir);
-+	if (!map) {
-+		swiotlb_full(hwdev, size, dir, 1);
-+		map = io_tlb_overflow_buffer;
-+	}
-+
-+	dev_addr = virt_to_bus(map);
-+	return dev_addr;
-+}
-+
-+/*
-+ * Unmap a single streaming mode DMA translation.  The dma_addr and size must
-+ * match what was provided for in a previous swiotlb_map_single call.  All
-+ * other usages are undefined.
-+ *
-+ * After this call, reads by the cpu to the buffer are guaranteed to see
-+ * whatever the device wrote there.
-+ */
-+void
-+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
-+		     int dir)
-+{
-+	BUG_ON(dir == DMA_NONE);
-+	if (in_swiotlb_aperture(dev_addr))
-+		unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
-+
-+/*
-+ * Make physical memory consistent for a single streaming mode DMA translation
-+ * after a transfer.
-+ *
-+ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
-+ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
-+ * call this function before doing so.  At the next point you give the PCI dma
-+ * address back to the card, you must first perform a
-+ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
-+ */
-+void
-+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-+			    size_t size, int dir)
-+{
-+	BUG_ON(dir == DMA_NONE);
-+	if (in_swiotlb_aperture(dev_addr))
-+		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
++	reserve_bootmem(0, PAGE_SIZE);
 +
-+void
-+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
-+			       size_t size, int dir)
-+{
-+	BUG_ON(dir == DMA_NONE);
-+	if (in_swiotlb_aperture(dev_addr))
-+		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
++	/* reserve EBDA region, it's a 4K region */
++	reserve_ebda_region();
 +
-+/*
-+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
-+ * This is the scatter-gather version of the above swiotlb_map_single
-+ * interface.  Here the scatter gather list elements are each tagged with the
-+ * appropriate dma address and length.  They are obtained via
-+ * sg_dma_{address,length}(SG).
-+ *
-+ * NOTE: An implementation may be able to use a smaller number of
-+ *       DMA address/length pairs than there are SG table elements.
-+ *       (for example via virtual mapping capabilities)
-+ *       The routine returns the number of addr/length pairs actually
-+ *       used, at most nents.
-+ *
-+ * Device ownership issues as mentioned above for swiotlb_map_single are the
-+ * same here.
-+ */
-+int
-+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
-+	       int dir)
-+{
-+	struct phys_addr buffer;
-+	dma_addr_t dev_addr;
-+	char *map;
-+	int i;
++    /* could be an AMD 768MPX chipset. Reserve a page  before VGA to prevent
++       PCI prefetch into it (errata #56). Usually the page is reserved anyways,
++       unless you have no PS/2 mouse plugged in. */
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++	    boot_cpu_data.x86 == 6)
++	     reserve_bootmem(0xa0000 - 4096, 4096);
 +
-+	BUG_ON(dir == DMA_NONE);
++#ifdef CONFIG_SMP
++	/*
++	 * But first pinch a few for the stack/trampoline stuff
++	 * FIXME: Don't need the extra page at 4K, but need to fix
++	 * trampoline before removing it. (see the GDT stuff)
++	 */
++	reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
++#endif
++#ifdef CONFIG_ACPI_SLEEP
++	/*
++	 * Reserve low memory region for sleep support.
++	 */
++	acpi_reserve_bootmem();
++#endif
++#endif /* !CONFIG_XEN */
 +
-+	for (i = 0; i < nelems; i++, sg++) {
-+		dev_addr = SG_ENT_PHYS_ADDRESS(sg);
-+		if (address_needs_mapping(hwdev, dev_addr)) {
-+			buffer.page   = sg->page;
-+			buffer.offset = sg->offset;
-+			map = map_single(hwdev, buffer, sg->length, dir);
-+			if (!map) {
-+				/* Don't panic here, we expect map_sg users
-+				   to do proper error handling. */
-+				swiotlb_full(hwdev, sg->length, dir, 0);
-+				swiotlb_unmap_sg(hwdev, sg - i, i, dir);
-+				sg[0].dma_length = 0;
-+				return 0;
-+			}
-+			sg->dma_address = (dma_addr_t)virt_to_bus(map);
-+		} else
-+			sg->dma_address = dev_addr;
-+		sg->dma_length = sg->length;
++#ifdef CONFIG_BLK_DEV_INITRD
++	if (xen_start_info->mod_start) {
++		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
++			/*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
++			initrd_start = INITRD_START + PAGE_OFFSET;
++			initrd_end = initrd_start+INITRD_SIZE;
++			initrd_below_start_ok = 1;
++		}
++		else {
++			printk(KERN_ERR "initrd extends beyond end of memory "
++			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++			    INITRD_START + INITRD_SIZE,
++			    max_low_pfn << PAGE_SHIFT);
++			initrd_start = 0;
++		}
 +	}
-+	return nelems;
++#endif
++#ifdef CONFIG_KEXEC
++	if (crashk_res.start != crashk_res.end)
++		reserve_bootmem(crashk_res.start,
++			crashk_res.end - crashk_res.start + 1);
++#endif
++
++	if (!xen_feature(XENFEAT_auto_translated_physmap))
++		phys_to_machine_mapping =
++			(unsigned long *)xen_start_info->mfn_list;
 +}
 +
 +/*
-+ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
-+ * concerning calls here are the same as for swiotlb_unmap_single() above.
++ * The node 0 pgdat is initialized before all of these because
++ * it's needed for bootmem.  node>0 pgdats have their virtual
++ * space allocated before the pagetables are in place to access
++ * them, so they can't be cleared then.
++ *
++ * This should all compile down to nothing when NUMA is off.
 + */
-+void
-+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
-+		 int dir)
++void __init remapped_pgdat_init(void)
 +{
-+	int i;
++	int nid;
 +
-+	BUG_ON(dir == DMA_NONE);
++	for_each_online_node(nid) {
++		if (nid != 0)
++			memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
++	}
++}
 +
-+	for (i = 0; i < nelems; i++, sg++)
-+		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+			unmap_single(hwdev, 
-+				     (void *)bus_to_virt(sg->dma_address),
-+				     sg->dma_length, dir);
++#ifdef CONFIG_MCA
++static void set_mca_bus(int x)
++{
++	MCA_bus = x;
++}
++#else
++static void set_mca_bus(int x) { }
++#endif
++
++/* Overridden in paravirt.c if CONFIG_PARAVIRT */
++char * __init __attribute__((weak)) memory_setup(void)
++{
++	return machine_specific_memory_setup();
 +}
 +
 +/*
-+ * Make physical memory consistent for a set of streaming mode DMA translations
-+ * after a transfer.
-+ *
-+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
-+ * and usage.
++ * Determine if we were loaded by an EFI loader.  If so, then we have also been
++ * passed the efi memmap, systab, etc., so we should use these data structures
++ * for initialization.  Note, the efi init code path is determined by the
++ * global efi_enabled. This allows the same kernel image to be used on existing
++ * systems (with a traditional BIOS) as well as on EFI systems.
 + */
-+void
-+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-+			int nelems, int dir)
++void __init setup_arch(char **cmdline_p)
 +{
-+	int i;
++	int i, j, k, fpp;
++	struct physdev_set_iopl set_iopl;
++	unsigned long max_low_pfn;
 +
-+	BUG_ON(dir == DMA_NONE);
++	/* Force a quick death if the kernel panics (not domain 0). */
++	extern int panic_timeout;
++	if (!panic_timeout && !is_initial_xendomain())
++		panic_timeout = 1;
 +
-+	for (i = 0; i < nelems; i++, sg++)
-+		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+			sync_single(hwdev,
-+				    (void *)bus_to_virt(sg->dma_address),
-+				    sg->dma_length, dir);
-+}
++	/* Register a call for panic conditions. */
++	atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
 +
-+void
-+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-+			   int nelems, int dir)
-+{
-+	int i;
++	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
++	HYPERVISOR_vm_assist(VMASST_CMD_enable,
++			     VMASST_TYPE_writable_pagetables);
 +
-+	BUG_ON(dir == DMA_NONE);
++	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
++	pre_setup_arch_hook();
++	early_cpu_init();
 +
-+	for (i = 0; i < nelems; i++, sg++)
-+		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+			sync_single(hwdev,
-+				    (void *)bus_to_virt(sg->dma_address),
-+				    sg->dma_length, dir);
-+}
++	/*
++	 * FIXME: This isn't an official loader_type right
++	 * now but does currently work with elilo.
++	 * If we were configured as an EFI kernel, check to make
++	 * sure that we were loaded correctly from elilo and that
++	 * the system table is valid.  If not, then initialize normally.
++	 */
++#ifdef CONFIG_EFI
++	if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
++		efi_enabled = 1;
++#endif
 +
-+dma_addr_t
-+swiotlb_map_page(struct device *hwdev, struct page *page,
-+		 unsigned long offset, size_t size,
-+		 enum dma_data_direction direction)
-+{
-+	struct phys_addr buffer;
-+	dma_addr_t dev_addr;
-+	char *map;
++	/* This must be initialized to UNNAMED_MAJOR for ipconfig to work
++	   properly.  Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
++	*/
++	ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
++ 	drive_info = DRIVE_INFO;
++ 	screen_info = SCREEN_INFO;
++	edid_info = EDID_INFO;
++	apm_info.bios = APM_BIOS_INFO;
++	ist_info = IST_INFO;
++	saved_videomode = VIDEO_MODE;
++	if( SYS_DESC_TABLE.length != 0 ) {
++		set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
++		machine_id = SYS_DESC_TABLE.table[0];
++		machine_submodel_id = SYS_DESC_TABLE.table[1];
++		BIOS_revision = SYS_DESC_TABLE.table[2];
++	}
++	bootloader_type = LOADER_TYPE;
 +
-+	dev_addr = page_to_bus(page) + offset;
-+	if (address_needs_mapping(hwdev, dev_addr)) {
-+		buffer.page   = page;
-+		buffer.offset = offset;
-+		map = map_single(hwdev, buffer, size, direction);
-+		if (!map) {
-+			swiotlb_full(hwdev, size, direction, 1);
-+			map = io_tlb_overflow_buffer;
++	if (is_initial_xendomain()) {
++		/* This is drawn from a dump from vgacon:startup in
++		 * standard Linux. */
++		screen_info.orig_video_mode = 3; 
++		screen_info.orig_video_isVGA = 1;
++		screen_info.orig_video_lines = 25;
++		screen_info.orig_video_cols = 80;
++		screen_info.orig_video_ega_bx = 3;
++		screen_info.orig_video_points = 16;
++		screen_info.orig_y = screen_info.orig_video_lines - 1;
++		if (xen_start_info->console.dom0.info_size >=
++		    sizeof(struct dom0_vga_console_info)) {
++			const struct dom0_vga_console_info *info =
++				(struct dom0_vga_console_info *)(
++					(char *)xen_start_info +
++					xen_start_info->console.dom0.info_off);
++			dom0_init_screen_info(info);
 +		}
-+		dev_addr = (dma_addr_t)virt_to_bus(map);
-+	}
++		xen_start_info->console.domU.mfn = 0;
++		xen_start_info->console.domU.evtchn = 0;
++	} else
++		screen_info.orig_video_isVGA = 0;
 +
-+	return dev_addr;
-+}
++#ifdef CONFIG_BLK_DEV_RAM
++	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
 +
-+void
-+swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
-+		   size_t size, enum dma_data_direction direction)
-+{
-+	BUG_ON(direction == DMA_NONE);
-+	if (in_swiotlb_aperture(dma_address))
-+		unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
-+}
++	setup_xen_features();
 +
-+int
-+swiotlb_dma_mapping_error(dma_addr_t dma_addr)
-+{
-+	return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
-+}
++	ARCH_SETUP
++	if (efi_enabled)
++		efi_init();
++	else {
++		printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++		print_memory_map(memory_setup());
++	}
 +
-+/*
-+ * Return whether the given PCI device DMA address mask can be supported
-+ * properly.  For example, if your device can only drive the low 24-bits
-+ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
-+ * this function.
-+ */
-+int
-+swiotlb_dma_supported (struct device *hwdev, u64 mask)
-+{
-+	return (mask >= ((1UL << IO_TLB_DMA_BITS) - 1));
-+}
++	copy_edd();
 +
-+EXPORT_SYMBOL(swiotlb_init);
-+EXPORT_SYMBOL(swiotlb_map_single);
-+EXPORT_SYMBOL(swiotlb_unmap_single);
-+EXPORT_SYMBOL(swiotlb_map_sg);
-+EXPORT_SYMBOL(swiotlb_unmap_sg);
-+EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
-+EXPORT_SYMBOL(swiotlb_sync_single_for_device);
-+EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
-+EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
-+EXPORT_SYMBOL(swiotlb_map_page);
-+EXPORT_SYMBOL(swiotlb_unmap_page);
-+EXPORT_SYMBOL(swiotlb_dma_mapping_error);
-+EXPORT_SYMBOL(swiotlb_dma_supported);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/sysenter.c linux-2.6.18-xen/arch/i386/kernel/sysenter.c
---- linux-2.6.18.3/arch/i386/kernel/sysenter.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/kernel/sysenter.c	2006-11-19 14:26:22.000000000 +0100
-@@ -23,6 +23,10 @@
- #include <asm/pgtable.h>
- #include <asm/unistd.h>
- 
-+#ifdef CONFIG_XEN
-+#include <xen/interface/callback.h>
-+#endif
++	if (!MOUNT_ROOT_RDONLY)
++		root_mountflags &= ~MS_RDONLY;
++	init_mm.start_code = (unsigned long) _text;
++	init_mm.end_code = (unsigned long) _etext;
++	init_mm.end_data = (unsigned long) _edata;
++	init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
++		       xen_start_info->nr_pt_frames) << PAGE_SHIFT;
 +
- /*
-  * Should the kernel map a VDSO page into processes and pass its
-  * address down to glibc upon exec()?
-@@ -44,6 +48,7 @@
- 
- void enable_sep_cpu(void)
- {
-+#ifndef CONFIG_X86_NO_TSS
- 	int cpu = get_cpu();
- 	struct tss_struct *tss = &per_cpu(init_tss, cpu);
- 
-@@ -58,6 +63,7 @@
- 	wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
- 	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
- 	put_cpu();	
-+#endif
- }
- 
- /*
-@@ -72,6 +78,18 @@
- {
- 	syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
- 
-+#ifdef CONFIG_XEN
-+	if (boot_cpu_has(X86_FEATURE_SEP)) {
-+		struct callback_register sysenter = {
-+			.type = CALLBACKTYPE_sysenter,
-+			.address = { __KERNEL_CS, (unsigned long)sysenter_entry },
-+		};
++	code_resource.start = virt_to_phys(_text);
++	code_resource.end = virt_to_phys(_etext)-1;
++	data_resource.start = virt_to_phys(_etext);
++	data_resource.end = virt_to_phys(_edata)-1;
 +
-+		if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0)
-+			clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
++	parse_early_param();
++
++	if (user_defined_memmap) {
++		printk(KERN_INFO "user-defined physical RAM map:\n");
++		print_memory_map("user");
 +	}
++
++	strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
++	*cmdline_p = command_line;
++
++	max_low_pfn = setup_memory();
++
++	/*
++	 * NOTE: before this point _nobody_ is allowed to allocate
++	 * any memory using the bootmem allocator.  Although the
++	 * alloctor is now initialised only the first 8Mb of the kernel
++	 * virtual address space has been mapped.  All allocations before
++	 * paging_init() has completed must use the alloc_bootmem_low_pages()
++	 * variant (which allocates DMA'able memory) and care must be taken
++	 * not to exceed the 8Mb limit.
++	 */
++
++#ifdef CONFIG_SMP
++	smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
 +#endif
++	paging_init();
++	remapped_pgdat_init();
++	sparse_init();
++	zone_sizes_init();
 +
- #ifdef CONFIG_COMPAT_VDSO
- 	__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
- 	printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
-@@ -79,8 +97,12 @@
- 	/*
- 	 * In the non-compat case the ELF coredumping code needs the fixmap:
- 	 */
-+#ifdef CONFIG_XEN
-+	__set_fixmap(FIX_VDSO, virt_to_machine(syscall_page), PAGE_KERNEL_RO);
-+#else
- 	__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
- #endif
++#ifdef CONFIG_X86_FIND_SMP_CONFIG
++	/*
++	 * Find and reserve possible boot-time SMP configuration:
++	 */
++	find_smp_config();
 +#endif
- 
- 	if (!boot_cpu_has(X86_FEATURE_SEP)) {
- 		memcpy(syscall_page,
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/time-xen.c linux-2.6.18-xen/arch/i386/kernel/time-xen.c
---- linux-2.6.18.3/arch/i386/kernel/time-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/time-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,1101 @@
-+/*
-+ *  linux/arch/i386/kernel/time.c
-+ *
-+ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
-+ *
-+ * This file contains the PC-specific time handling details:
-+ * reading the RTC at bootup, etc..
-+ * 1994-07-02    Alan Modra
-+ *	fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
-+ * 1995-03-26    Markus Kuhn
-+ *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
-+ *      precision CMOS clock update
-+ * 1996-05-03    Ingo Molnar
-+ *      fixed time warps in do_[slow|fast]_gettimeoffset()
-+ * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
-+ *		"A Kernel Model for Precision Timekeeping" by Dave Mills
-+ * 1998-09-05    (Various)
-+ *	More robust do_fast_gettimeoffset() algorithm implemented
-+ *	(works with APM, Cyrix 6x86MX and Centaur C6),
-+ *	monotonic gettimeofday() with fast_get_timeoffset(),
-+ *	drift-proof precision TSC calibration on boot
-+ *	(C. Scott Ananian <cananian at alumni.princeton.edu>, Andrew D.
-+ *	Balsa <andrebalsa at altern.org>, Philip Gladstone <philip at raptor.com>;
-+ *	ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause at tu-harburg.de>).
-+ * 1998-12-16    Andrea Arcangeli
-+ *	Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
-+ *	because was not accounting lost_ticks.
-+ * 1998-12-24 Copyright (C) 1998  Andrea Arcangeli
-+ *	Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
-+ *	serialize accesses to xtime/lost_ticks).
-+ */
++	numa_kva_reserve();
 +
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/param.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/time.h>
-+#include <linux/delay.h>
-+#include <linux/init.h>
-+#include <linux/smp.h>
-+#include <linux/module.h>
-+#include <linux/sysdev.h>
-+#include <linux/bcd.h>
-+#include <linux/efi.h>
-+#include <linux/mca.h>
-+#include <linux/sysctl.h>
-+#include <linux/percpu.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/posix-timers.h>
++	/* Make sure we have a correctly sized P->M table. */
++	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++		phys_to_machine_mapping = alloc_bootmem_low_pages(
++		     max_pfn * sizeof(unsigned long));
++		memset(phys_to_machine_mapping, ~0,
++		       max_pfn * sizeof(unsigned long));
++		memcpy(phys_to_machine_mapping,
++		       (unsigned long *)xen_start_info->mfn_list,
++		       xen_start_info->nr_pages * sizeof(unsigned long));
++		free_bootmem(
++		     __pa(xen_start_info->mfn_list),
++		     PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++				     sizeof(unsigned long))));
 +
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/irq.h>
-+#include <asm/msr.h>
-+#include <asm/delay.h>
-+#include <asm/mpspec.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+#include <asm/timer.h>
-+#include <asm/sections.h>
++		/*
++		 * Initialise the list of the frames that specify the list of
++		 * frames that make up the p2m table. Used by save/restore
++		 */
++		pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
++		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++		     virt_to_mfn(pfn_to_mfn_frame_list_list);
 +
-+#include "mach_time.h"
++		fpp = PAGE_SIZE/sizeof(unsigned long);
++		for (i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++) {
++			if ((j % fpp) == 0) {
++				k++;
++				BUG_ON(k>=16);
++				pfn_to_mfn_frame_list[k] =
++					alloc_bootmem_low_pages(PAGE_SIZE);
++				pfn_to_mfn_frame_list_list[k] =
++					virt_to_mfn(pfn_to_mfn_frame_list[k]);
++				j=0;
++			}
++			pfn_to_mfn_frame_list[k][j] =
++				virt_to_mfn(&phys_to_machine_mapping[i]);
++		}
++		HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++	}
 +
-+#include <linux/timex.h>
++	/*
++	 * NOTE: at this point the bootmem allocator is fully available.
++	 */
 +
-+#include <asm/hpet.h>
++	if (is_initial_xendomain())
++		dmi_scan_machine();
 +
-+#include <asm/arch_hooks.h>
++#ifdef CONFIG_X86_GENERICARCH
++	generic_apic_probe();
++#endif	
++	if (efi_enabled)
++		efi_map_memmap();
 +
-+#include <xen/evtchn.h>
-+#include <xen/interface/vcpu.h>
++	set_iopl.iopl = 1;
++	HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
 +
-+#if defined (__i386__)
-+#include <asm/i8259.h>
++#ifdef CONFIG_ACPI
++	if (!is_initial_xendomain()) {
++		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
++		acpi_disabled = 1;
++		acpi_ht = 0;
++	}
++
++	/*
++	 * Parse the ACPI tables for possible boot-time SMP configuration.
++	 */
++	acpi_boot_table_init();
 +#endif
 +
-+int pit_latch_buggy;              /* extern */
++#ifdef CONFIG_PCI
++#ifdef CONFIG_X86_IO_APIC
++	check_acpi_pci();	/* Checks more than just ACPI actually */
++#endif
++#endif
 +
-+#if defined(__x86_64__)
-+unsigned long vxtime_hz = PIT_TICK_RATE;
-+struct vxtime_data __vxtime __section_vxtime;   /* for vsyscalls */
-+volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
-+unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
-+struct timespec __xtime __section_xtime;
-+struct timezone __sys_tz __section_sys_tz;
++#ifdef CONFIG_ACPI
++	acpi_boot_init();
++
++#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
++	if (def_to_bigsmp)
++		printk(KERN_WARNING "More than 8 CPUs detected and "
++			"CONFIG_X86_PC cannot handle it.\nUse "
++			"CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
++#endif
++#endif
++#ifdef CONFIG_X86_LOCAL_APIC
++	if (smp_found_config)
++		get_smp_config();
++#endif
++#if defined(CONFIG_XEN) && defined(CONFIG_SMP)
++	prefill_possible_map();
 +#endif
 +
-+#define USEC_PER_TICK (USEC_PER_SEC / HZ)
-+#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
-+#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
++	e820_register_memory();
 +
-+#define NS_SCALE	10 /* 2^10, carefully chosen */
-+#define US_SCALE	32 /* 2^32, arbitralrily chosen */
++	if (is_initial_xendomain()) {
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++		if (!efi_enabled ||
++		    (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
++			conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++		conswitchp = &dummy_con;
++#endif
++#endif
++	} else {
++#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
++		conswitchp = &dummy_con;
++#endif
++	}
++#ifdef CONFIG_X86_TSC
++	tsc_init();
++#endif
++}
 +
-+unsigned int cpu_khz;	/* Detected as we calibrate the TSC */
-+EXPORT_SYMBOL(cpu_khz);
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++	HYPERVISOR_shutdown(SHUTDOWN_crash);
++	/* we're never actually going to get here... */
++	return NOTIFY_DONE;
++}
 +
-+extern unsigned long wall_jiffies;
++static __init int add_pcspkr(void)
++{
++	struct platform_device *pd;
++	int ret;
 +
-+DEFINE_SPINLOCK(rtc_lock);
-+EXPORT_SYMBOL(rtc_lock);
++	pd = platform_device_alloc("pcspkr", -1);
++	if (!pd)
++		return -ENOMEM;
 +
-+extern struct init_timer_opts timer_tsc_init;
-+extern struct timer_opts timer_tsc;
-+#define timer_none timer_tsc
++	ret = platform_device_add(pd);
++	if (ret)
++		platform_device_put(pd);
 +
-+/* These are peridically updated in shared_info, and then copied here. */
-+struct shadow_time_info {
-+	u64 tsc_timestamp;     /* TSC at last update of time vals.  */
-+	u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
-+	u32 tsc_to_nsec_mul;
-+	u32 tsc_to_usec_mul;
-+	int tsc_shift;
-+	u32 version;
-+};
-+static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
-+static struct timespec shadow_tv;
-+static u32 shadow_tv_version;
++	return ret;
++}
++device_initcall(add_pcspkr);
++
++/*
++ * Local Variables:
++ * mode:c
++ * c-file-style:"k&r"
++ * c-basic-offset:8
++ * End:
++ */
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/smp-xen.c b/arch/i386/kernel/smp-xen.c
+--- a/arch/i386/kernel/smp-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/smp-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,624 @@
++/*
++ *	Intel SMP support routines.
++ *
++ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
++ *	(c) 1998-99, 2000 Ingo Molnar <mingo at redhat.com>
++ *
++ *	This code is released under the GNU General Public License version 2 or
++ *	later.
++ */
 +
-+/* Keep track of last time we did processing/updating of jiffies and xtime. */
-+static u64 processed_system_time;   /* System time (ns) at last processing. */
-+static DEFINE_PER_CPU(u64, processed_system_time);
++#include <linux/init.h>
 +
-+/* How much CPU time was spent blocked and how much was 'stolen'? */
-+static DEFINE_PER_CPU(u64, processed_stolen_time);
-+static DEFINE_PER_CPU(u64, processed_blocked_time);
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/cache.h>
++#include <linux/interrupt.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
 +
-+/* Current runstate of each CPU (updated automatically by the hypervisor). */
-+static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
++#include <asm/mtrr.h>
++#include <asm/tlbflush.h>
++#if 0
++#include <mach_apic.h>
++#endif
++#include <xen/evtchn.h>
 +
-+/* Must be signed, as it's compared with s64 quantities which can be -ve. */
-+#define NS_PER_TICK (1000000000LL/HZ)
++/*
++ *	Some notes on x86 processor bugs affecting SMP operation:
++ *
++ *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
++ *	The Linux implications for SMP are handled as follows:
++ *
++ *	Pentium III / [Xeon]
++ *		None of the E1AP-E3AP errata are visible to the user.
++ *
++ *	E1AP.	see PII A1AP
++ *	E2AP.	see PII A2AP
++ *	E3AP.	see PII A3AP
++ *
++ *	Pentium II / [Xeon]
++ *		None of the A1AP-A3AP errata are visible to the user.
++ *
++ *	A1AP.	see PPro 1AP
++ *	A2AP.	see PPro 2AP
++ *	A3AP.	see PPro 7AP
++ *
++ *	Pentium Pro
++ *		None of 1AP-9AP errata are visible to the normal user,
++ *	except occasional delivery of 'spurious interrupt' as trap #15.
++ *	This is very rare and a non-problem.
++ *
++ *	1AP.	Linux maps APIC as non-cacheable
++ *	2AP.	worked around in hardware
++ *	3AP.	fixed in C0 and above steppings microcode update.
++ *		Linux does not use excessive STARTUP_IPIs.
++ *	4AP.	worked around in hardware
++ *	5AP.	symmetric IO mode (normal Linux operation) not affected.
++ *		'noapic' mode has vector 0xf filled out properly.
++ *	6AP.	'noapic' mode might be affected - fixed in later steppings
++ *	7AP.	We do not assume writes to the LVT deassering IRQs
++ *	8AP.	We do not enable low power mode (deep sleep) during MP bootup
++ *	9AP.	We do not use mixed mode
++ *
++ *	Pentium
++ *		There is a marginal case where REP MOVS on 100MHz SMP
++ *	machines with B stepping processors can fail. XXX should provide
++ *	an L1cache=Writethrough or L1cache=off option.
++ *
++ *		B stepping CPUs may hang. There are hardware work arounds
++ *	for this. We warn about it in case your board doesn't have the work
++ *	arounds. Basically thats so I can tell anyone with a B stepping
++ *	CPU and SMP problems "tough".
++ *
++ *	Specific items [From Pentium Processor Specification Update]
++ *
++ *	1AP.	Linux doesn't use remote read
++ *	2AP.	Linux doesn't trust APIC errors
++ *	3AP.	We work around this
++ *	4AP.	Linux never generated 3 interrupts of the same priority
++ *		to cause a lost local interrupt.
++ *	5AP.	Remote read is never used
++ *	6AP.	not affected - worked around in hardware
++ *	7AP.	not affected - worked around in hardware
++ *	8AP.	worked around in hardware - we get explicit CS errors if not
++ *	9AP.	only 'noapic' mode affected. Might generate spurious
++ *		interrupts, we log only the first one and count the
++ *		rest silently.
++ *	10AP.	not affected - worked around in hardware
++ *	11AP.	Linux reads the APIC between writes to avoid this, as per
++ *		the documentation. Make sure you preserve this as it affects
++ *		the C stepping chips too.
++ *	12AP.	not affected - worked around in hardware
++ *	13AP.	not affected - worked around in hardware
++ *	14AP.	we always deassert INIT during bootup
++ *	15AP.	not affected - worked around in hardware
++ *	16AP.	not affected - worked around in hardware
++ *	17AP.	not affected - worked around in hardware
++ *	18AP.	not affected - worked around in hardware
++ *	19AP.	not affected - worked around in BIOS
++ *
++ *	If this sounds worrying believe me these bugs are either ___RARE___,
++ *	or are signal timing bugs worked around in hardware and there's
++ *	about nothing of note with C stepping upwards.
++ */
 +
-+static inline void __normalize_time(time_t *sec, s64 *nsec)
++DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
++
++/*
++ * the following functions deal with sending IPIs between CPUs.
++ *
++ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
++ */
++
++static inline int __prepare_ICR (unsigned int shortcut, int vector)
 +{
-+	while (*nsec >= NSEC_PER_SEC) {
-+		(*nsec) -= NSEC_PER_SEC;
-+		(*sec)++;
-+	}
-+	while (*nsec < 0) {
-+		(*nsec) += NSEC_PER_SEC;
-+		(*sec)--;
++	unsigned int icr = shortcut | APIC_DEST_LOGICAL;
++
++	switch (vector) {
++	default:
++		icr |= APIC_DM_FIXED | vector;
++		break;
++	case NMI_VECTOR:
++		icr |= APIC_DM_NMI;
++		break;
 +	}
++	return icr;
 +}
 +
-+/* Does this guest OS track Xen time, or set its wall clock independently? */
-+static int independent_wallclock = 0;
-+static int __init __independent_wallclock(char *str)
++static inline int __prepare_ICR2 (unsigned int mask)
 +{
-+	independent_wallclock = 1;
-+	return 1;
++	return SET_APIC_DEST_FIELD(mask);
 +}
-+__setup("independent_wallclock", __independent_wallclock);
 +
-+/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
-+static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
-+static int __init __permitted_clock_jitter(char *str)
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
 +{
-+	permitted_clock_jitter = simple_strtoul(str, NULL, 0);
-+	return 1;
++	int irq = per_cpu(ipi_to_irq, cpu)[vector];
++	BUG_ON(irq < 0);
++	notify_remote_via_irq(irq);
 +}
-+__setup("permitted_clock_jitter=", __permitted_clock_jitter);
 +
-+#ifndef CONFIG_X86
-+int tsc_disable __devinitdata = 0;
-+#endif
-+
-+static void delay_tsc(unsigned long loops)
++void __send_IPI_shortcut(unsigned int shortcut, int vector)
 +{
-+	unsigned long bclock, now;
++	int cpu;
 +
-+	rdtscl(bclock);
-+	do {
-+		rep_nop();
-+		rdtscl(now);
-+	} while ((now - bclock) < loops);
++	switch (shortcut) {
++	case APIC_DEST_SELF:
++		__send_IPI_one(smp_processor_id(), vector);
++		break;
++	case APIC_DEST_ALLBUT:
++		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++			if (cpu == smp_processor_id())
++				continue;
++			if (cpu_isset(cpu, cpu_online_map)) {
++				__send_IPI_one(cpu, vector);
++			}
++		}
++		break;
++	default:
++		printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++		       vector);
++		break;
++	}
 +}
 +
-+struct timer_opts timer_tsc = {
-+	.name = "tsc",
-+	.delay = delay_tsc,
-+};
++void fastcall send_IPI_self(int vector)
++{
++	__send_IPI_shortcut(APIC_DEST_SELF, vector);
++}
 +
 +/*
-+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
-+ * yielding a 64-bit result.
++ * This is only used on smaller machines.
 + */
-+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
++void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
 +{
-+	u64 product;
-+#ifdef __i386__
-+	u32 tmp1, tmp2;
-+#endif
++	unsigned long mask = cpus_addr(cpumask)[0];
++	unsigned long flags;
++	unsigned int cpu;
 +
-+	if (shift < 0)
-+		delta >>= -shift;
-+	else
-+		delta <<= shift;
++	local_irq_save(flags);
++	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
 +
-+#ifdef __i386__
-+	__asm__ (
-+		"mul  %5       ; "
-+		"mov  %4,%%eax ; "
-+		"mov  %%edx,%4 ; "
-+		"mul  %5       ; "
-+		"xor  %5,%5    ; "
-+		"add  %4,%%eax ; "
-+		"adc  %5,%%edx ; "
-+		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
-+		: "a" ((u32)delta), "1" ((u32)(delta >> US_SCALE)), "2" (mul_frac) );
-+#else
-+	__asm__ (
-+		"mul %%rdx ; shrd $32,%%rdx,%%rax"
-+		: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-+#endif
++	for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++		if (cpu_isset(cpu, cpumask)) {
++			__send_IPI_one(cpu, vector);
++		}
++	}
 +
-+	return product;
++	local_irq_restore(flags);
 +}
 +
-+#if defined (__i386__)
-+int read_current_timer(unsigned long *timer_val)
++void send_IPI_mask_sequence(cpumask_t mask, int vector)
 +{
-+	rdtscl(*timer_val);
-+	return 0;
-+}
-+#endif
 +
-+void init_cpu_khz(void)
-+{
-+	u64 __cpu_khz = 1000000ULL << US_SCALE;
-+	struct vcpu_time_info *info;
-+	info = &HYPERVISOR_shared_info->vcpu_info[0].time;
-+	do_div(__cpu_khz, info->tsc_to_system_mul);
-+	if (info->tsc_shift < 0)
-+		cpu_khz = __cpu_khz << -info->tsc_shift;
-+	else
-+		cpu_khz = __cpu_khz >> info->tsc_shift;
++	send_IPI_mask_bitmask(mask, vector);
 +}
 +
-+static u64 get_nsec_offset(struct shadow_time_info *shadow)
++#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
++
++#if 0 /* XEN */
++/*
++ *	Smarter SMP flushing macros. 
++ *		c/o Linus Torvalds.
++ *
++ *	These mean you can really definitely utterly forget about
++ *	writing to user space from interrupts. (Its not allowed anyway).
++ *
++ *	Optimizations Manfred Spraul <manfred at colorfullife.com>
++ */
++
++static cpumask_t flush_cpumask;
++static struct mm_struct * flush_mm;
++static unsigned long flush_va;
++static DEFINE_SPINLOCK(tlbstate_lock);
++#define FLUSH_ALL	0xffffffff
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context, 
++ * instead update mm->cpu_vm_mask.
++ *
++ * We need to reload %cr3 since the page tables may be going
++ * away from under us..
++ */
++static inline void leave_mm (unsigned long cpu)
 +{
-+	u64 now, delta;
-+	rdtscll(now);
-+	delta = now - shadow->tsc_timestamp;
-+	return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
++	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++		BUG();
++	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
++	load_cr3(swapper_pg_dir);
 +}
 +
-+static unsigned long get_usec_offset(struct shadow_time_info *shadow)
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * 	Stop ipi delivery for the old mm. This is not synchronized with
++ * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * 	for the wrong mm, and in the worst case we perform a superflous
++ * 	tlb flush.
++ * 1a2) set cpu_tlbstate to TLBSTATE_OK
++ * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ *	was in lazy tlb mode.
++ * 1a3) update cpu_tlbstate[].active_mm
++ * 	Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * 	Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ *	cpu_tlbstate[].active_mm is correct, cpu0 already handles
++ *	flush ipis.
++ * 1b1) set cpu_tlbstate to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * 	Atomically set the bit [other cpus will start sending flush ipis],
++ * 	and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ *   runs in kernel space, the cpu could load tlb entries for user space
++ *   pages.
++ *
++ * The good news is that cpu_tlbstate is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ */
++
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
++				     struct pt_regs *regs)
 +{
-+	u64 now, delta;
-+	rdtscll(now);
-+	delta = now - shadow->tsc_timestamp;
-+	return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
++	unsigned long cpu;
++
++	cpu = get_cpu();
++
++	if (!cpu_isset(cpu, flush_cpumask))
++		goto out;
++		/* 
++		 * This was a BUG() but until someone can quote me the
++		 * line from the intel manual that guarantees an IPI to
++		 * multiple CPUs is retried _only_ on the erroring CPUs
++		 * its staying as a return
++		 *
++		 * BUG();
++		 */
++		 
++	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
++		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
++			if (flush_va == FLUSH_ALL)
++				local_flush_tlb();
++			else
++				__flush_tlb_one(flush_va);
++		} else
++			leave_mm(cpu);
++	}
++	smp_mb__before_clear_bit();
++	cpu_clear(cpu, flush_cpumask);
++	smp_mb__after_clear_bit();
++out:
++	put_cpu_no_resched();
++
++	return IRQ_HANDLED;
 +}
 +
-+static void __update_wallclock(time_t sec, long nsec)
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++						unsigned long va)
 +{
-+	long wtm_nsec, xtime_nsec;
-+	time_t wtm_sec, xtime_sec;
-+	u64 tmp, wc_nsec;
++	/*
++	 * A couple of (to be removed) sanity checks:
++	 *
++	 * - current CPU must not be in mask
++	 * - mask must exist :)
++	 */
++	BUG_ON(cpus_empty(cpumask));
++	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
++	BUG_ON(!mm);
 +
-+	/* Adjust wall-clock time base based on wall_jiffies ticks. */
-+	wc_nsec = processed_system_time;
-+	wc_nsec += sec * (u64)NSEC_PER_SEC;
-+	wc_nsec += nsec;
-+	wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
++	/* If a CPU which we ran on has gone down, OK. */
++	cpus_and(cpumask, cpumask, cpu_online_map);
++	if (cpus_empty(cpumask))
++		return;
 +
-+	/* Split wallclock base into seconds and nanoseconds. */
-+	tmp = wc_nsec;
-+	xtime_nsec = do_div(tmp, 1000000000);
-+	xtime_sec  = (time_t)tmp;
++	/*
++	 * i'm not happy about this global shared spinlock in the
++	 * MM hot path, but we'll see how contended it is.
++	 * Temporarily this turns IRQs off, so that lockups are
++	 * detected by the NMI watchdog.
++	 */
++	spin_lock(&tlbstate_lock);
++	
++	flush_mm = mm;
++	flush_va = va;
++#if NR_CPUS <= BITS_PER_LONG
++	atomic_set_mask(cpumask, &flush_cpumask);
++#else
++	{
++		int k;
++		unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
++		unsigned long *cpu_mask = (unsigned long *)&cpumask;
++		for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
++			atomic_set_mask(cpu_mask[k], &flush_mask[k]);
++	}
++#endif
++	/*
++	 * We have to send the IPI only to
++	 * CPUs affected.
++	 */
++	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
 +
-+	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
-+	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
++	while (!cpus_empty(flush_cpumask))
++		/* nothing. lockup detection does not belong here */
++		mb();
 +
-+	set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
-+	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++	flush_mm = NULL;
++	flush_va = 0;
++	spin_unlock(&tlbstate_lock);
++}
++	
++void flush_tlb_current_task(void)
++{
++	struct mm_struct *mm = current->mm;
++	cpumask_t cpu_mask;
 +
-+	ntp_clear();
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
++
++	local_flush_tlb();
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++	preempt_enable();
 +}
 +
-+static void update_wallclock(void)
++void flush_tlb_mm (struct mm_struct * mm)
 +{
-+	shared_info_t *s = HYPERVISOR_shared_info;
++	cpumask_t cpu_mask;
 +
-+	do {
-+		shadow_tv_version = s->wc_version;
-+		rmb();
-+		shadow_tv.tv_sec  = s->wc_sec;
-+		shadow_tv.tv_nsec = s->wc_nsec;
-+		rmb();
-+	} while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
 +
-+	if (!independent_wallclock)
-+		__update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
++	if (current->active_mm == mm) {
++		if (current->mm)
++			local_flush_tlb();
++		else
++			leave_mm(smp_processor_id());
++	}
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++	preempt_enable();
 +}
 +
-+/*
-+ * Reads a consistent set of time-base values from Xen, into a shadow data
-+ * area.
-+ */
-+static void get_time_values_from_xen(void)
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
 +{
-+	shared_info_t           *s = HYPERVISOR_shared_info;
-+	struct vcpu_time_info   *src;
-+	struct shadow_time_info *dst;
++	struct mm_struct *mm = vma->vm_mm;
++	cpumask_t cpu_mask;
 +
-+	src = &s->vcpu_info[smp_processor_id()].time;
-+	dst = &per_cpu(shadow_time, smp_processor_id());
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
 +
-+	do {
-+		dst->version = src->version;
-+		rmb();
-+		dst->tsc_timestamp     = src->tsc_timestamp;
-+		dst->system_timestamp  = src->system_time;
-+		dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
-+		dst->tsc_shift         = src->tsc_shift;
-+		rmb();
-+	} while ((src->version & 1) | (dst->version ^ src->version));
++	if (current->active_mm == mm) {
++		if(current->mm)
++			__flush_tlb_one(va);
++		 else
++		 	leave_mm(smp_processor_id());
++	}
++
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, va);
 +
-+	dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
++	preempt_enable();
 +}
++EXPORT_SYMBOL(flush_tlb_page);
 +
-+static inline int time_values_up_to_date(int cpu)
++static void do_flush_tlb_all(void* info)
 +{
-+	struct vcpu_time_info   *src;
-+	struct shadow_time_info *dst;
-+
-+	src = &HYPERVISOR_shared_info->vcpu_info[cpu].time;
-+	dst = &per_cpu(shadow_time, cpu);
++	unsigned long cpu = smp_processor_id();
 +
-+	rmb();
-+	return (dst->version == src->version);
++	__flush_tlb_all();
++	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
++		leave_mm(cpu);
 +}
 +
-+/*
-+ * This is a special lock that is owned by the CPU and holds the index
-+ * register we are working with.  It is required for NMI access to the
-+ * CMOS/RTC registers.  See include/asm-i386/mc146818rtc.h for details.
-+ */
-+volatile unsigned long cmos_lock = 0;
-+EXPORT_SYMBOL(cmos_lock);
-+
-+/* Routines for accessing the CMOS RAM/RTC. */
-+unsigned char rtc_cmos_read(unsigned char addr)
++void flush_tlb_all(void)
 +{
-+	unsigned char val;
-+	lock_cmos_prefix(addr);
-+	outb_p(addr, RTC_PORT(0));
-+	val = inb_p(RTC_PORT(1));
-+	lock_cmos_suffix(addr);
-+	return val;
++	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
 +}
-+EXPORT_SYMBOL(rtc_cmos_read);
 +
-+void rtc_cmos_write(unsigned char val, unsigned char addr)
++#else
++
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
++				     struct pt_regs *regs)
++{ return 0; }
++void flush_tlb_current_task(void)
++{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
++void flush_tlb_mm(struct mm_struct * mm)
++{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
++void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
++{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
++EXPORT_SYMBOL(flush_tlb_page);
++void flush_tlb_all(void)
++{ xen_tlb_flush_all(); }
++
++#endif /* XEN */
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++void smp_send_reschedule(int cpu)
 +{
-+	lock_cmos_prefix(addr);
-+	outb_p(addr, RTC_PORT(0));
-+	outb_p(val, RTC_PORT(1));
-+	lock_cmos_suffix(addr);
++	WARN_ON(cpu_is_offline(cpu));
++	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
 +}
-+EXPORT_SYMBOL(rtc_cmos_write);
 +
 +/*
-+ * This version of gettimeofday has microsecond resolution
-+ * and better than microsecond precision on fast x86 machines with TSC.
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
 + */
-+void do_gettimeofday(struct timeval *tv)
-+{
-+	unsigned long seq;
-+	unsigned long usec, sec;
-+	unsigned long max_ntp_tick;
-+	s64 nsec;
-+	unsigned int cpu;
-+	struct shadow_time_info *shadow;
-+	u32 local_time_version;
++static DEFINE_SPINLOCK(call_lock);
 +
-+	cpu = get_cpu();
-+	shadow = &per_cpu(shadow_time, cpu);
++struct call_data_struct {
++	void (*func) (void *info);
++	void *info;
++	atomic_t started;
++	atomic_t finished;
++	int wait;
++};
 +
-+	do {
-+		unsigned long lost;
++void lock_ipi_call_lock(void)
++{
++	spin_lock_irq(&call_lock);
++}
 +
-+		local_time_version = shadow->version;
-+		seq = read_seqbegin(&xtime_lock);
++void unlock_ipi_call_lock(void)
++{
++	spin_unlock_irq(&call_lock);
++}
 +
-+		usec = get_usec_offset(shadow);
-+		lost = jiffies - wall_jiffies;
++static struct call_data_struct *call_data;
 +
-+		/*
-+		 * If time_adjust is negative then NTP is slowing the clock
-+		 * so make sure not to go into next possible interval.
-+		 * Better to lose some accuracy than have time go backwards..
-+		 */
-+		if (unlikely(time_adjust < 0)) {
-+			max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
-+			usec = min(usec, max_ntp_tick);
++/**
++ * smp_call_function(): Run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++			int wait)
++{
++	struct call_data_struct data;
++	int cpus;
 +
-+			if (lost)
-+				usec += lost * max_ntp_tick;
-+		}
-+		else if (unlikely(lost))
-+			usec += lost * (USEC_PER_SEC / HZ);
++	/* Holding any lock stops cpus from going down. */
++	spin_lock(&call_lock);
++	cpus = num_online_cpus() - 1;
++	if (!cpus) {
++		spin_unlock(&call_lock);
++		return 0;
++	}
 +
-+		sec = xtime.tv_sec;
-+		usec += (xtime.tv_nsec / NSEC_PER_USEC);
++	/* Can deadlock when called with interrupts disabled */
++	WARN_ON(irqs_disabled());
 +
-+		nsec = shadow->system_timestamp - processed_system_time;
-+		__normalize_time(&sec, &nsec);
-+		usec += (long)nsec / NSEC_PER_USEC;
++	data.func = func;
++	data.info = info;
++	atomic_set(&data.started, 0);
++	data.wait = wait;
++	if (wait)
++		atomic_set(&data.finished, 0);
 +
-+		if (unlikely(!time_values_up_to_date(cpu))) {
-+			/*
-+			 * We may have blocked for a long time,
-+			 * rendering our calculations invalid
-+			 * (e.g. the time delta may have
-+			 * overflowed). Detect that and recalculate
-+			 * with fresh values.
-+			 */
-+			get_time_values_from_xen();
-+			continue;
-+		}
-+	} while (read_seqretry(&xtime_lock, seq) ||
-+		 (local_time_version != shadow->version));
++	call_data = &data;
++	mb();
++	
++	/* Send a message to all other CPUs and wait for them to respond */
++	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
 +
-+	put_cpu();
++	/* Wait for response */
++	while (atomic_read(&data.started) != cpus)
++		barrier();
 +
-+	while (usec >= USEC_PER_SEC) {
-+		usec -= USEC_PER_SEC;
-+		sec++;
-+	}
++	if (wait)
++		while (atomic_read(&data.finished) != cpus)
++			barrier();
++	spin_unlock(&call_lock);
 +
-+	tv->tv_sec = sec;
-+	tv->tv_usec = usec;
++	return 0;
 +}
++EXPORT_SYMBOL(smp_call_function);
 +
-+EXPORT_SYMBOL(do_gettimeofday);
-+
-+int do_settimeofday(struct timespec *tv)
++static void stop_this_cpu (void * dummy)
 +{
-+	time_t sec;
-+	s64 nsec;
-+	unsigned int cpu;
-+	struct shadow_time_info *shadow;
-+	dom0_op_t op;
-+
-+	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
-+		return -EINVAL;
-+
-+	cpu = get_cpu();
-+	shadow = &per_cpu(shadow_time, cpu);
-+
-+	write_seqlock_irq(&xtime_lock);
-+
 +	/*
-+	 * Ensure we don't get blocked for a long time so that our time delta
-+	 * overflows. If that were to happen then our shadow time values would
-+	 * be stale, so we can retry with fresh ones.
++	 * Remove this CPU:
 +	 */
-+	for (;;) {
-+		nsec = tv->tv_nsec - get_nsec_offset(shadow);
-+		if (time_values_up_to_date(cpu))
-+			break;
-+		get_time_values_from_xen();
-+	}
-+	sec = tv->tv_sec;
-+	__normalize_time(&sec, &nsec);
-+
-+	if (is_initial_xendomain() && !independent_wallclock) {
-+		op.cmd = DOM0_SETTIME;
-+		op.u.settime.secs        = sec;
-+		op.u.settime.nsecs       = nsec;
-+		op.u.settime.system_time = shadow->system_timestamp;
-+		HYPERVISOR_dom0_op(&op);
-+		update_wallclock();
-+	} else if (independent_wallclock) {
-+		nsec -= shadow->system_timestamp;
-+		__normalize_time(&sec, &nsec);
-+		__update_wallclock(sec, nsec);
-+	}
-+
-+	write_sequnlock_irq(&xtime_lock);
-+
-+	put_cpu();
-+
-+	clock_was_set();
-+	return 0;
++	cpu_clear(smp_processor_id(), cpu_online_map);
++	local_irq_disable();
++#if 0
++	disable_local_APIC();
++#endif
++	if (cpu_data[smp_processor_id()].hlt_works_ok)
++		for(;;) halt();
++	for (;;);
 +}
 +
-+EXPORT_SYMBOL(do_settimeofday);
++/*
++ * this function calls the 'stop' function on all other CPUs in the system.
++ */
 +
-+static void sync_xen_wallclock(unsigned long dummy);
-+static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
-+static void sync_xen_wallclock(unsigned long dummy)
++void smp_send_stop(void)
 +{
-+	time_t sec;
-+	s64 nsec;
-+	dom0_op_t op;
-+
-+	if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
-+		return;
++	smp_call_function(stop_this_cpu, NULL, 1, 0);
 +
-+	write_seqlock_irq(&xtime_lock);
++	local_irq_disable();
++#if 0
++	disable_local_APIC();
++#endif
++	local_irq_enable();
++}
 +
-+	sec  = xtime.tv_sec;
-+	nsec = xtime.tv_nsec + ((jiffies - wall_jiffies) * (u64)NS_PER_TICK);
-+	__normalize_time(&sec, &nsec);
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
++				     struct pt_regs *regs)
++{
++	return IRQ_HANDLED;
++}
 +
-+	op.cmd = DOM0_SETTIME;
-+	op.u.settime.secs        = sec;
-+	op.u.settime.nsecs       = nsec;
-+	op.u.settime.system_time = processed_system_time;
-+	HYPERVISOR_dom0_op(&op);
++#include <linux/kallsyms.h>
++irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
++					struct pt_regs *regs)
++{
++	void (*func) (void *info) = call_data->func;
++	void *info = call_data->info;
++	int wait = call_data->wait;
 +
-+	update_wallclock();
++	/*
++	 * Notify initiating CPU that I've grabbed the data and am
++	 * about to execute the function
++	 */
++	mb();
++	atomic_inc(&call_data->started);
++	/*
++	 * At this point the info structure may be out of scope unless wait==1
++	 */
++	irq_enter();
++	(*func)(info);
++	irq_exit();
 +
-+	write_sequnlock_irq(&xtime_lock);
++	if (wait) {
++		mb();
++		atomic_inc(&call_data->finished);
++	}
 +
-+	/* Once per minute. */
-+	mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
++	return IRQ_HANDLED;
 +}
 +
-+static int set_rtc_mmss(unsigned long nowtime)
-+{
-+	int retval;
-+	unsigned long flags;
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/swiotlb.c b/arch/i386/kernel/swiotlb.c
+--- a/arch/i386/kernel/swiotlb.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/swiotlb.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,672 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * This implementation is a fallback for platforms that do not support
++ * I/O TLBs (aka DMA address translation hardware).
++ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick at intel.com>
++ * Copyright (C) 2000 Goutham Rao <goutham.rao at intel.com>
++ * Copyright (C) 2000, 2003 Hewlett-Packard Co
++ *	David Mosberger-Tang <davidm at hpl.hp.com>
++ * Copyright (C) 2005 Keir Fraser <keir at xensource.com>
++ */
 +
-+	if (independent_wallclock || !is_initial_xendomain())
-+		return 0;
++#include <linux/cache.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <asm/io.h>
++#include <asm/pci.h>
++#include <asm/dma.h>
++#include <asm/uaccess.h>
++#include <xen/interface/memory.h>
 +
-+	/* gets recalled with irq locally disabled */
-+	spin_lock_irqsave(&rtc_lock, flags);
-+	if (efi_enabled)
-+		retval = efi_set_rtc_mmss(nowtime);
-+	else
-+		retval = mach_set_rtc_mmss(nowtime);
-+	spin_unlock_irqrestore(&rtc_lock, flags);
++int swiotlb;
++EXPORT_SYMBOL(swiotlb);
 +
-+	return retval;
-+}
++#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
 +
-+/* monotonic_clock(): returns # of nanoseconds passed since time_init()
-+ *		Note: This function is required to return accurate
-+ *		time even in the absence of multiple timer ticks.
++#define SG_ENT_PHYS_ADDRESS(sg)	(page_to_bus((sg)->page) + (sg)->offset)
++
++/*
++ * Maximum allowable number of contiguous slabs to map,
++ * must be a power of 2.  What is the appropriate value ?
++ * The complexity of {map,unmap}_single is linearly dependent on this value.
 + */
-+unsigned long long monotonic_clock(void)
-+{
-+	int cpu = get_cpu();
-+	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
-+	u64 time;
-+	u32 local_time_version;
++#define IO_TLB_SEGSIZE	128
 +
-+	do {
-+		local_time_version = shadow->version;
-+		barrier();
-+		time = shadow->system_timestamp + get_nsec_offset(shadow);
-+		if (!time_values_up_to_date(cpu))
-+			get_time_values_from_xen();
-+		barrier();
-+	} while (local_time_version != shadow->version);
++/*
++ * log of the size of each IO TLB slab.  The number of slabs is command line
++ * controllable.
++ */
++#define IO_TLB_SHIFT 11
 +
-+	put_cpu();
++/* Width of DMA addresses in the IO TLB. 31 bits is an aacraid limitation. */
++#define IO_TLB_DMA_BITS 31
 +
-+	return time;
-+}
-+EXPORT_SYMBOL(monotonic_clock);
++int swiotlb_force;
++static char *iotlb_virt_start;
++static unsigned long iotlb_nslabs;
 +
-+unsigned long long sched_clock(void)
-+{
-+	return monotonic_clock();
-+}
++/*
++ * Used to do a quick range check in swiotlb_unmap_single and
++ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
++ * API.
++ */
++static unsigned long iotlb_pfn_start, iotlb_pfn_end;
 +
-+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-+unsigned long profile_pc(struct pt_regs *regs)
++/* Does the given dma address reside within the swiotlb aperture? */
++static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
 +{
-+	unsigned long pc = instruction_pointer(regs);
-+
-+#ifdef __x86_64__
-+	/* Assume the lock function has either no stack frame or only a single word.
-+	   This checks if the address on the stack looks like a kernel text address.
-+	   There is a small window for false hits, but in that case the tick
-+	   is just accounted to the spinlock function.
-+	   Better would be to write these functions in assembler again
-+	   and check exactly. */
-+	if (!user_mode_vm(regs) && in_lock_functions(pc)) {
-+		char *v = *(char **)regs->rsp;
-+		if ((v >= _stext && v <= _etext) ||
-+			(v >= _sinittext && v <= _einittext) ||
-+			(v >= (char *)MODULES_VADDR  && v <= (char *)MODULES_END))
-+			return (unsigned long)v;
-+		return ((unsigned long *)regs->rsp)[1];
-+	}
-+#else
-+	if (!user_mode_vm(regs) && in_lock_functions(pc))
-+		return *(unsigned long *)(regs->ebp + 4);
-+#endif
-+
-+	return pc;
++	unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
++	return (pfn_valid(pfn)
++		&& (pfn >= iotlb_pfn_start)
++		&& (pfn < iotlb_pfn_end));
 +}
-+EXPORT_SYMBOL(profile_pc);
-+#endif
 +
-+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+	s64 delta, delta_cpu, stolen, blocked;
-+	u64 sched_time;
-+	int i, cpu = smp_processor_id();
-+	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
-+	struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++/*
++ * When the IOMMU overflows we return a fallback buffer. This sets the size.
++ */
++static unsigned long io_tlb_overflow = 32*1024;
 +
-+	write_seqlock(&xtime_lock);
++void *io_tlb_overflow_buffer;
 +
-+	do {
-+		get_time_values_from_xen();
++/*
++ * This is a free list describing the number of free entries available from
++ * each index
++ */
++static unsigned int *io_tlb_list;
++static unsigned int io_tlb_index;
 +
-+		/* Obtain a consistent snapshot of elapsed wallclock cycles. */
-+		delta = delta_cpu =
-+			shadow->system_timestamp + get_nsec_offset(shadow);
-+		delta     -= processed_system_time;
-+		delta_cpu -= per_cpu(processed_system_time, cpu);
++/*
++ * We need to save away the original address corresponding to a mapped entry
++ * for the sync operations.
++ */
++static struct phys_addr {
++	struct page *page;
++	unsigned int offset;
++} *io_tlb_orig_addr;
 +
-+		/*
-+		 * Obtain a consistent snapshot of stolen/blocked cycles. We
-+		 * can use state_entry_time to detect if we get preempted here.
-+		 */
-+		do {
-+			sched_time = runstate->state_entry_time;
-+			barrier();
-+			stolen = runstate->time[RUNSTATE_runnable] +
-+				runstate->time[RUNSTATE_offline] -
-+				per_cpu(processed_stolen_time, cpu);
-+			blocked = runstate->time[RUNSTATE_blocked] -
-+				per_cpu(processed_blocked_time, cpu);
-+			barrier();
-+		} while (sched_time != runstate->state_entry_time);
-+	} while (!time_values_up_to_date(cpu));
++/*
++ * Protect the above data structures in the map and unmap calls
++ */
++static DEFINE_SPINLOCK(io_tlb_lock);
 +
-+	if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
-+	     unlikely(delta_cpu < -(s64)permitted_clock_jitter))
-+	    && printk_ratelimit()) {
-+		printk("Timer ISR/%d: Time went backwards: "
-+		       "delta=%lld delta_cpu=%lld shadow=%lld "
-+		       "off=%lld processed=%lld cpu_processed=%lld\n",
-+		       cpu, delta, delta_cpu, shadow->system_timestamp,
-+		       (s64)get_nsec_offset(shadow),
-+		       processed_system_time,
-+		       per_cpu(processed_system_time, cpu));
-+		for (i = 0; i < num_online_cpus(); i++)
-+			printk(" %d: %lld\n", i,
-+			       per_cpu(processed_system_time, i));
++static int __init
++setup_io_tlb_npages(char *str)
++{
++	/* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
++	if (isdigit(*str)) {
++		iotlb_nslabs = simple_strtoul(str, &str, 0) <<
++			(20 - IO_TLB_SHIFT);
++		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++		/* Round up to power of two (xen_create_contiguous_region). */
++		while (iotlb_nslabs & (iotlb_nslabs-1))
++			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
 +	}
++	if (*str == ',')
++		++str;
++	/*
++         * NB. 'force' enables the swiotlb, but doesn't force its use for
++         * every DMA like it does on native Linux. 'off' forcibly disables
++         * use of the swiotlb.
++         */
++	if (!strcmp(str, "force"))
++		swiotlb_force = 1;
++	else if (!strcmp(str, "off"))
++		swiotlb_force = -1;
++	return 1;
++}
++__setup("swiotlb=", setup_io_tlb_npages);
++/* make io_tlb_overflow tunable too? */
 +
-+	/* System-wide jiffy work. */
-+	while (delta >= NS_PER_TICK) {
-+		delta -= NS_PER_TICK;
-+		processed_system_time += NS_PER_TICK;
-+		do_timer(regs);
-+	}
++/*
++ * Statically reserve bounce buffer space and initialize bounce buffer data
++ * structures for the software IO TLB used to implement the PCI DMA API.
++ */
++void
++swiotlb_init_with_default_size (size_t default_size)
++{
++	unsigned long i, bytes;
 +
-+	if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
-+		update_wallclock();
-+		clock_was_set();
++	if (!iotlb_nslabs) {
++		iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
++		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++		/* Round up to power of two (xen_create_contiguous_region). */
++		while (iotlb_nslabs & (iotlb_nslabs-1))
++			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
 +	}
 +
-+	write_sequnlock(&xtime_lock);
++	bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
 +
 +	/*
-+	 * Account stolen ticks.
-+	 * HACK: Passing NULL to account_steal_time()
-+	 * ensures that the ticks are accounted as stolen.
++	 * Get IO TLB memory from the low pages
 +	 */
-+	if ((stolen > 0) && (delta_cpu > 0)) {
-+		delta_cpu -= stolen;
-+		if (unlikely(delta_cpu < 0))
-+			stolen += delta_cpu; /* clamp local-time progress */
-+		do_div(stolen, NS_PER_TICK);
-+		per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
-+		per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
-+		account_steal_time(NULL, (cputime_t)stolen);
++	iotlb_virt_start = alloc_bootmem_low_pages(bytes);
++	if (!iotlb_virt_start)
++		panic("Cannot allocate SWIOTLB buffer!\n"
++		      "Use dom0_mem Xen boot parameter to reserve\n"
++		      "some DMA memory (e.g., dom0_mem=-128M).\n");
++
++	for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
++		int rc = xen_create_contiguous_region(
++			(unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
++			get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
++			IO_TLB_DMA_BITS);
++		BUG_ON(rc);
 +	}
 +
 +	/*
-+	 * Account blocked ticks.
-+	 * HACK: Passing idle_task to account_steal_time()
-+	 * ensures that the ticks are accounted as idle/wait.
++	 * Allocate and initialize the free list array.  This array is used
++	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
 +	 */
-+	if ((blocked > 0) && (delta_cpu > 0)) {
-+		delta_cpu -= blocked;
-+		if (unlikely(delta_cpu < 0))
-+			blocked += delta_cpu; /* clamp local-time progress */
-+		do_div(blocked, NS_PER_TICK);
-+		per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
-+		per_cpu(processed_system_time, cpu)  += blocked * NS_PER_TICK;
-+		account_steal_time(idle_task(cpu), (cputime_t)blocked);
-+	}
++	io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
++	for (i = 0; i < iotlb_nslabs; i++)
++ 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++	io_tlb_index = 0;
++	io_tlb_orig_addr = alloc_bootmem(
++		iotlb_nslabs * sizeof(*io_tlb_orig_addr));
 +
-+	/* Account user/system ticks. */
-+	if (delta_cpu > 0) {
-+		do_div(delta_cpu, NS_PER_TICK);
-+		per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
-+		if (user_mode(regs))
-+			account_user_time(current, (cputime_t)delta_cpu);
-+		else
-+			account_system_time(current, HARDIRQ_OFFSET,
-+					    (cputime_t)delta_cpu);
-+	}
++	/*
++	 * Get the overflow emergency buffer
++	 */
++	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
 +
-+	/* Local timer processing (see update_process_times()). */
-+	run_local_timers();
-+	if (rcu_pending(cpu))
-+		rcu_check_callbacks(cpu, user_mode(regs));
-+	scheduler_tick();
-+	run_posix_cpu_timers(current);
++	iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
++	iotlb_pfn_end   = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
 +
-+	return IRQ_HANDLED;
++	printk(KERN_INFO "Software IO TLB enabled: \n"
++	       " Aperture:     %lu megabytes\n"
++	       " Kernel range: 0x%016lx - 0x%016lx\n",
++	       bytes >> 20,
++	       (unsigned long)iotlb_virt_start,
++	       (unsigned long)iotlb_virt_start + bytes);
 +}
 +
-+static void init_missing_ticks_accounting(int cpu)
++void
++swiotlb_init(void)
 +{
-+	struct vcpu_register_runstate_memory_area area;
-+	struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++	long ram_end;
++	size_t defsz = 64 * (1 << 20); /* 64MB default size */
 +
-+	memset(runstate, 0, sizeof(*runstate));
++	if (swiotlb_force == 1) {
++		swiotlb = 1;
++	} else if ((swiotlb_force != -1) &&
++		   is_running_on_xen() &&
++		   is_initial_xendomain()) {
++		/* Domain 0 always has a swiotlb. */
++		ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++		if (ram_end <= 0x7ffff)
++			defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
++		swiotlb = 1;
++	}
 +
-+	area.addr.v = runstate;
-+	HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
++	if (swiotlb)
++		swiotlb_init_with_default_size(defsz);
++	else
++		printk(KERN_INFO "Software IO TLB disabled\n");
++}
 +
-+	per_cpu(processed_blocked_time, cpu) =
-+		runstate->time[RUNSTATE_blocked];
-+	per_cpu(processed_stolen_time, cpu) =
-+		runstate->time[RUNSTATE_runnable] +
-+		runstate->time[RUNSTATE_offline];
++/*
++ * We use __copy_to_user_inatomic to transfer to the host buffer because the
++ * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
++ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
++ * unnecessary copy from the aperture to the host buffer, and a page fault.
++ */
++static void
++__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
++{
++	if (PageHighMem(buffer.page)) {
++		size_t len, bytes;
++		char *dev, *host, *kmp;
++		len = size;
++		while (len != 0) {
++			if (((bytes = len) + buffer.offset) > PAGE_SIZE)
++				bytes = PAGE_SIZE - buffer.offset;
++			kmp  = kmap_atomic(buffer.page, KM_SWIOTLB);
++			dev  = dma_addr + size - len;
++			host = kmp + buffer.offset;
++			if (dir == DMA_FROM_DEVICE) {
++				if (__copy_to_user_inatomic(host, dev, bytes))
++					/* inaccessible */;
++			} else
++				memcpy(dev, host, bytes);
++			kunmap_atomic(kmp, KM_SWIOTLB);
++			len -= bytes;
++			buffer.page++;
++			buffer.offset = 0;
++		}
++	} else {
++		char *host = (char *)phys_to_virt(
++			page_to_pseudophys(buffer.page)) + buffer.offset;
++		if (dir == DMA_FROM_DEVICE) {
++			if (__copy_to_user_inatomic(host, dma_addr, size))
++				/* inaccessible */;
++		} else if (dir == DMA_TO_DEVICE)
++			memcpy(dma_addr, host, size);
++	}
 +}
 +
-+/* not static: needed by APM */
-+unsigned long get_cmos_time(void)
++/*
++ * Allocates bounce buffer and returns its kernel virtual address.
++ */
++static void *
++map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
 +{
-+	unsigned long retval;
 +	unsigned long flags;
++	char *dma_addr;
++	unsigned int nslots, stride, index, wrap;
++	int i;
 +
-+	spin_lock_irqsave(&rtc_lock, flags);
-+
-+	if (efi_enabled)
-+		retval = efi_get_time();
++	/*
++	 * For mappings greater than a page, we limit the stride (and
++	 * hence alignment) to a page size.
++	 */
++	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++	if (size > PAGE_SIZE)
++		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
 +	else
-+		retval = mach_get_cmos_time();
-+
-+	spin_unlock_irqrestore(&rtc_lock, flags);
-+
-+	return retval;
-+}
-+EXPORT_SYMBOL(get_cmos_time);
-+
-+static void sync_cmos_clock(unsigned long dummy);
-+
-+static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
++		stride = 1;
 +
-+static void sync_cmos_clock(unsigned long dummy)
-+{
-+	struct timeval now, next;
-+	int fail = 1;
++	BUG_ON(!nslots);
 +
 +	/*
-+	 * If we have an externally synchronized Linux clock, then update
-+	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
-+	 * called as close as possible to 500 ms before the new second starts.
-+	 * This code is run on a timer.  If the clock is set, that timer
-+	 * may not expire at the correct time.  Thus, we adjust...
++	 * Find suitable number of IO TLB entries size that will fit this
++	 * request and allocate a buffer from that IO TLB pool.
 +	 */
-+	if (!ntp_synced())
-+		/*
-+		 * Not synced, exit, do not restart a timer (if one is
-+		 * running, let it run out).
-+		 */
-+		return;
++	spin_lock_irqsave(&io_tlb_lock, flags);
++	{
++		wrap = index = ALIGN(io_tlb_index, stride);
 +
-+	do_gettimeofday(&now);
-+	if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
-+	    now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
-+		fail = set_rtc_mmss(now.tv_sec);
++		if (index >= iotlb_nslabs)
++			wrap = index = 0;
 +
-+	next.tv_usec = USEC_AFTER - now.tv_usec;
-+	if (next.tv_usec <= 0)
-+		next.tv_usec += USEC_PER_SEC;
++		do {
++			/*
++			 * If we find a slot that indicates we have 'nslots'
++			 * number of contiguous buffers, we allocate the
++			 * buffers from that slot and mark the entries as '0'
++			 * indicating unavailable.
++			 */
++			if (io_tlb_list[index] >= nslots) {
++				int count = 0;
 +
-+	if (!fail)
-+		next.tv_sec = 659;
-+	else
-+		next.tv_sec = 0;
++				for (i = index; i < (int)(index + nslots); i++)
++					io_tlb_list[i] = 0;
++				for (i = index - 1;
++				     (OFFSET(i, IO_TLB_SEGSIZE) !=
++				      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++				     i--)
++					io_tlb_list[i] = ++count;
++				dma_addr = iotlb_virt_start +
++					(index << IO_TLB_SHIFT);
 +
-+	if (next.tv_usec >= USEC_PER_SEC) {
-+		next.tv_sec++;
-+		next.tv_usec -= USEC_PER_SEC;
-+	}
-+	mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
-+}
++				/*
++				 * Update the indices to avoid searching in
++				 * the next round.
++				 */
++				io_tlb_index = 
++					((index + nslots) < iotlb_nslabs
++					 ? (index + nslots) : 0);
 +
-+void notify_arch_cmos_timer(void)
-+{
-+	mod_timer(&sync_cmos_timer, jiffies + 1);
-+	mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
-+}
++				goto found;
++			}
++			index += stride;
++			if (index >= iotlb_nslabs)
++				index = 0;
++		} while (index != wrap);
 +
-+static long clock_cmos_diff, sleep_start;
++		spin_unlock_irqrestore(&io_tlb_lock, flags);
++		return NULL;
++	}
++  found:
++	spin_unlock_irqrestore(&io_tlb_lock, flags);
 +
-+static int timer_suspend(struct sys_device *dev, pm_message_t state)
-+{
 +	/*
-+	 * Estimate time zone so that set_time can update the clock
++	 * Save away the mapping from the original address to the DMA address.
++	 * This is needed when we sync the memory.  Then we sync the buffer if
++	 * needed.
 +	 */
-+	clock_cmos_diff = -get_cmos_time();
-+	clock_cmos_diff += get_seconds();
-+	sleep_start = get_cmos_time();
-+	return 0;
++	io_tlb_orig_addr[index] = buffer;
++	if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++		__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
++
++	return dma_addr;
 +}
 +
-+static int timer_resume(struct sys_device *dev)
++/*
++ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
++ */
++static void
++unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
 +{
 +	unsigned long flags;
-+	unsigned long sec;
-+	unsigned long sleep_length;
-+
-+#ifdef CONFIG_HPET_TIMER
-+	if (is_hpet_enabled())
-+		hpet_reenable();
-+#endif
-+	sec = get_cmos_time() + clock_cmos_diff;
-+	sleep_length = (get_cmos_time() - sleep_start) * HZ;
-+	write_seqlock_irqsave(&xtime_lock, flags);
-+	xtime.tv_sec = sec;
-+	xtime.tv_nsec = 0;
-+	jiffies_64 += sleep_length;
-+	wall_jiffies += sleep_length;
-+	write_sequnlock_irqrestore(&xtime_lock, flags);
-+	touch_softlockup_watchdog();
-+	return 0;
-+}
-+
-+static struct sysdev_class timer_sysclass = {
-+	.resume = timer_resume,
-+	.suspend = timer_suspend,
-+	set_kset_name("timer"),
-+};
++	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++	struct phys_addr buffer = io_tlb_orig_addr[index];
 +
++	/*
++	 * First, sync the memory before unmapping the entry
++	 */
++	if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++		__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
 +
-+/* XXX this driverfs stuff should probably go elsewhere later -john */
-+static struct sys_device device_timer = {
-+	.id	= 0,
-+	.cls	= &timer_sysclass,
-+};
++	/*
++	 * Return the buffer to the free list by setting the corresponding
++	 * entries to indicate the number of contigous entries available.
++	 * While returning the entries to the free list, we merge the entries
++	 * with slots below and above the pool being returned.
++	 */
++	spin_lock_irqsave(&io_tlb_lock, flags);
++	{
++		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
++			 io_tlb_list[index + nslots] : 0);
++		/*
++		 * Step 1: return the slots to the free list, merging the
++		 * slots with superceeding slots
++		 */
++		for (i = index + nslots - 1; i >= index; i--)
++			io_tlb_list[i] = ++count;
++		/*
++		 * Step 2: merge the returned slots with the preceding slots,
++		 * if available (non zero)
++		 */
++		for (i = index - 1;
++		     (OFFSET(i, IO_TLB_SEGSIZE) !=
++		      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++		     i--)
++			io_tlb_list[i] = ++count;
++	}
++	spin_unlock_irqrestore(&io_tlb_lock, flags);
++}
 +
-+static int time_init_device(void)
++static void
++sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
 +{
-+	int error = sysdev_class_register(&timer_sysclass);
-+	if (!error)
-+		error = sysdev_register(&device_timer);
-+	return error;
++	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++	struct phys_addr buffer = io_tlb_orig_addr[index];
++	BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
++	__sync_single(buffer, dma_addr, size, dir);
 +}
 +
-+device_initcall(time_init_device);
-+
-+#ifdef CONFIG_HPET_TIMER
-+extern void (*late_time_init)(void);
-+/* Duplicate of time_init() below, with hpet_enable part added */
-+static void __init hpet_time_init(void)
++static void
++swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
 +{
-+	xtime.tv_sec = get_cmos_time();
-+	xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
-+	set_normalized_timespec(&wall_to_monotonic,
-+		-xtime.tv_sec, -xtime.tv_nsec);
++	/*
++	 * Ran out of IOMMU space for this operation. This is very bad.
++	 * Unfortunately the drivers cannot handle this operation properly.
++	 * unless they check for pci_dma_mapping_error (most don't)
++	 * When the mapping is small enough return a static buffer to limit
++	 * the damage, or panic when the transfer is too big.
++	 */
++	printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
++	       "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
 +
-+	if ((hpet_enable() >= 0) && hpet_use_timer) {
-+		printk("Using HPET for base-timer\n");
++	if (size > io_tlb_overflow && do_panic) {
++		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++			panic("PCI-DMA: Memory would be corrupted\n");
++		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++			panic("PCI-DMA: Random memory would be DMAed\n");
 +	}
-+	time_init_hook();
 +}
-+#endif
-+
-+/* Dynamically-mapped IRQ. */
-+DEFINE_PER_CPU(int, timer_irq);
 +
-+extern void (*late_time_init)(void);
-+static void setup_cpu0_timer_irq(void)
++/*
++ * Map a single buffer of the indicated size for DMA in streaming mode.  The
++ * PCI address to use is returned.
++ *
++ * Once the device is given the dma address, the device owns this memory until
++ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
++ */
++dma_addr_t
++swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
 +{
-+	per_cpu(timer_irq, 0) =
-+		bind_virq_to_irqhandler(
-+			VIRQ_TIMER,
-+			0,
-+			timer_interrupt,
-+			SA_INTERRUPT,
-+			"timer0",
-+			NULL);
-+	BUG_ON(per_cpu(timer_irq, 0) < 0);
-+}
++	dma_addr_t dev_addr = virt_to_bus(ptr);
++	void *map;
++	struct phys_addr buffer;
 +
-+void __init time_init(void)
-+{
-+#ifdef CONFIG_HPET_TIMER
-+	if (is_hpet_capable()) {
-+		/*
-+		 * HPET initialization needs to do memory-mapped io. So, let
-+		 * us do a late initialization after mem_init().
-+		 */
-+		late_time_init = hpet_time_init;
-+		return;
-+	}
-+#endif
-+	get_time_values_from_xen();
++	BUG_ON(dir == DMA_NONE);
 +
-+	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
-+	per_cpu(processed_system_time, 0) = processed_system_time;
-+	init_missing_ticks_accounting(0);
++	/*
++	 * If the pointer passed in happens to be in the device's DMA window,
++	 * we can safely return the device addr and not worry about bounce
++	 * buffering it.
++	 */
++	if (!range_straddles_page_boundary(ptr, size) &&
++	    !address_needs_mapping(hwdev, dev_addr))
++		return dev_addr;
 +
-+	update_wallclock();
++	/*
++	 * Oh well, have to allocate and map a bounce buffer.
++	 */
++	buffer.page   = virt_to_page(ptr);
++	buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
++	map = map_single(hwdev, buffer, size, dir);
++	if (!map) {
++		swiotlb_full(hwdev, size, dir, 1);
++		map = io_tlb_overflow_buffer;
++	}
 +
-+	init_cpu_khz();
-+	printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
-+	       cpu_khz / 1000, cpu_khz % 1000);
++	dev_addr = virt_to_bus(map);
++	return dev_addr;
++}
 +
-+#if defined(__x86_64__)
-+	vxtime.mode = VXTIME_TSC;
-+	vxtime.quot = (1000000L << US_SCALE) / vxtime_hz;
-+	vxtime.tsc_quot = (1000L << US_SCALE) / cpu_khz;
-+	sync_core();
-+	rdtscll(vxtime.last_tsc);
-+#endif
++/*
++ * Unmap a single streaming mode DMA translation.  The dma_addr and size must
++ * match what was provided for in a previous swiotlb_map_single call.  All
++ * other usages are undefined.
++ *
++ * After this call, reads by the cpu to the buffer are guaranteed to see
++ * whatever the device wrote there.
++ */
++void
++swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
++		     int dir)
++{
++	BUG_ON(dir == DMA_NONE);
++	if (in_swiotlb_aperture(dev_addr))
++		unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
 +
-+	/* Cannot request_irq() until kmem is initialised. */
-+	late_time_init = setup_cpu0_timer_irq;
++/*
++ * Make physical memory consistent for a single streaming mode DMA translation
++ * after a transfer.
++ *
++ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
++ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
++ * call this function before doing so.  At the next point you give the PCI dma
++ * address back to the card, you must first perform a
++ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
++ */
++void
++swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++			    size_t size, int dir)
++{
++	BUG_ON(dir == DMA_NONE);
++	if (in_swiotlb_aperture(dev_addr))
++		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
 +}
 +
-+/* Convert jiffies to system time. */
-+u64 jiffies_to_st(unsigned long j)
++void
++swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
++			       size_t size, int dir)
 +{
-+	unsigned long seq;
-+	long delta;
-+	u64 st;
-+
-+	do {
-+		seq = read_seqbegin(&xtime_lock);
-+		delta = j - jiffies;
-+		if (delta < 1) {
-+			/* Triggers in some wrap-around cases, but that's okay:
-+			 * we just end up with a shorter timeout. */
-+			st = processed_system_time + NS_PER_TICK;
-+		} else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
-+			/* Very long timeout means there is no pending timer.
-+			 * We indicate this to Xen by passing zero timeout. */
-+			st = 0;
-+		} else {
-+			st = processed_system_time + delta * (u64)NS_PER_TICK;
-+		}
-+	} while (read_seqretry(&xtime_lock, seq));
-+
-+	return st;
++	BUG_ON(dir == DMA_NONE);
++	if (in_swiotlb_aperture(dev_addr))
++		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
 +}
-+EXPORT_SYMBOL(jiffies_to_st);
 +
 +/*
-+ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
-+ * These functions are based on implementations from arch/s390/kernel/time.c
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * This is the scatter-gather version of the above swiotlb_map_single
++ * interface.  Here the scatter gather list elements are each tagged with the
++ * appropriate dma address and length.  They are obtained via
++ * sg_dma_{address,length}(SG).
++ *
++ * NOTE: An implementation may be able to use a smaller number of
++ *       DMA address/length pairs than there are SG table elements.
++ *       (for example via virtual mapping capabilities)
++ *       The routine returns the number of addr/length pairs actually
++ *       used, at most nents.
++ *
++ * Device ownership issues as mentioned above for swiotlb_map_single are the
++ * same here.
 + */
-+static void stop_hz_timer(void)
++int
++swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++	       int dir)
 +{
-+	unsigned int cpu = smp_processor_id();
-+	unsigned long j;
-+
-+	cpu_set(cpu, nohz_cpu_mask);
-+
-+	/* See matching smp_mb in rcu_start_batch in rcupdate.c.  These mbs  */
-+	/* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a  */
-+	/* value of rcp->cur that matches rdp->quiescbatch and allows us to  */
-+	/* stop the hz timer then the cpumasks created for subsequent values */
-+	/* of cur in rcu_start_batch are guaranteed to pick up the updated   */
-+	/* nohz_cpu_mask and so will not depend on this cpu.                 */
++	struct phys_addr buffer;
++	dma_addr_t dev_addr;
++	char *map;
++	int i;
 +
-+	smp_mb();
++	BUG_ON(dir == DMA_NONE);
 +
-+	/* Leave ourselves in tick mode if rcu or softirq or timer pending. */
-+	if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
-+	    (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
-+		cpu_clear(cpu, nohz_cpu_mask);
-+		j = jiffies + 1;
++	for (i = 0; i < nelems; i++, sg++) {
++		dev_addr = SG_ENT_PHYS_ADDRESS(sg);
++		if (address_needs_mapping(hwdev, dev_addr)) {
++			buffer.page   = sg->page;
++			buffer.offset = sg->offset;
++			map = map_single(hwdev, buffer, sg->length, dir);
++			if (!map) {
++				/* Don't panic here, we expect map_sg users
++				   to do proper error handling. */
++				swiotlb_full(hwdev, sg->length, dir, 0);
++				swiotlb_unmap_sg(hwdev, sg - i, i, dir);
++				sg[0].dma_length = 0;
++				return 0;
++			}
++			sg->dma_address = (dma_addr_t)virt_to_bus(map);
++		} else
++			sg->dma_address = dev_addr;
++		sg->dma_length = sg->length;
 +	}
-+
-+	if (HYPERVISOR_set_timer_op(jiffies_to_st(j)) != 0)
-+		BUG();
++	return nelems;
 +}
 +
-+static void start_hz_timer(void)
++/*
++ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
++ * concerning calls here are the same as for swiotlb_unmap_single() above.
++ */
++void
++swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++		 int dir)
 +{
-+	cpu_clear(smp_processor_id(), nohz_cpu_mask);
-+}
++	int i;
 +
-+void raw_safe_halt(void)
-+{
-+	stop_hz_timer();
-+	/* Blocking includes an implicit local_irq_enable(). */
-+	HYPERVISOR_block();
-+	start_hz_timer();
++	BUG_ON(dir == DMA_NONE);
++
++	for (i = 0; i < nelems; i++, sg++)
++		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++			unmap_single(hwdev, 
++				     (void *)bus_to_virt(sg->dma_address),
++				     sg->dma_length, dir);
 +}
-+EXPORT_SYMBOL(raw_safe_halt);
 +
-+void halt(void)
++/*
++ * Make physical memory consistent for a set of streaming mode DMA translations
++ * after a transfer.
++ *
++ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
++ * and usage.
++ */
++void
++swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++			int nelems, int dir)
 +{
-+	if (irqs_disabled())
-+		HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
++	int i;
++
++	BUG_ON(dir == DMA_NONE);
++
++	for (i = 0; i < nelems; i++, sg++)
++		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++			sync_single(hwdev,
++				    (void *)bus_to_virt(sg->dma_address),
++				    sg->dma_length, dir);
 +}
-+EXPORT_SYMBOL(halt);
 +
-+/* No locking required. We are only CPU running, and interrupts are off. */
-+void time_resume(void)
++void
++swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++			   int nelems, int dir)
 +{
-+	init_cpu_khz();
-+
-+	get_time_values_from_xen();
++	int i;
 +
-+	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
-+	per_cpu(processed_system_time, 0) = processed_system_time;
-+	init_missing_ticks_accounting(0);
++	BUG_ON(dir == DMA_NONE);
 +
-+	update_wallclock();
++	for (i = 0; i < nelems; i++, sg++)
++		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++			sync_single(hwdev,
++				    (void *)bus_to_virt(sg->dma_address),
++				    sg->dma_length, dir);
 +}
 +
-+#ifdef CONFIG_SMP
-+static char timer_name[NR_CPUS][15];
-+
-+void local_setup_timer(unsigned int cpu)
++dma_addr_t
++swiotlb_map_page(struct device *hwdev, struct page *page,
++		 unsigned long offset, size_t size,
++		 enum dma_data_direction direction)
 +{
-+	int seq;
++	struct phys_addr buffer;
++	dma_addr_t dev_addr;
++	char *map;
 +
-+	BUG_ON(cpu == 0);
++	dev_addr = page_to_bus(page) + offset;
++	if (address_needs_mapping(hwdev, dev_addr)) {
++		buffer.page   = page;
++		buffer.offset = offset;
++		map = map_single(hwdev, buffer, size, direction);
++		if (!map) {
++			swiotlb_full(hwdev, size, direction, 1);
++			map = io_tlb_overflow_buffer;
++		}
++		dev_addr = (dma_addr_t)virt_to_bus(map);
++	}
 +
-+	do {
-+		seq = read_seqbegin(&xtime_lock);
-+		/* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
-+		per_cpu(processed_system_time, cpu) =
-+			per_cpu(shadow_time, 0).system_timestamp;
-+		init_missing_ticks_accounting(cpu);
-+	} while (read_seqretry(&xtime_lock, seq));
++	return dev_addr;
++}
 +
-+	sprintf(timer_name[cpu], "timer%d", cpu);
-+	per_cpu(timer_irq, cpu) =
-+		bind_virq_to_irqhandler(
-+			VIRQ_TIMER,
-+			cpu,
-+			timer_interrupt,
-+			SA_INTERRUPT,
-+			timer_name[cpu],
-+			NULL);
-+	BUG_ON(per_cpu(timer_irq, cpu) < 0);
++void
++swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++		   size_t size, enum dma_data_direction direction)
++{
++	BUG_ON(!valid_dma_direction(direction));
++	if (in_swiotlb_aperture(dma_address))
++		unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
 +}
 +
-+void local_teardown_timer(unsigned int cpu)
++int
++swiotlb_dma_mapping_error(dma_addr_t dma_addr)
 +{
-+	BUG_ON(cpu == 0);
-+	unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
++	return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
 +}
-+#endif
 +
 +/*
-+ * /proc/sys/xen: This really belongs in another file. It can stay here for
-+ * now however.
++ * Return whether the given PCI device DMA address mask can be supported
++ * properly.  For example, if your device can only drive the low 24-bits
++ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
++ * this function.
 + */
-+static ctl_table xen_subtable[] = {
-+	{
-+		.ctl_name	= 1,
-+		.procname	= "independent_wallclock",
-+		.data		= &independent_wallclock,
-+		.maxlen		= sizeof(independent_wallclock),
-+		.mode		= 0644,
-+		.proc_handler	= proc_dointvec
-+	},
-+	{
-+		.ctl_name	= 2,
-+		.procname	= "permitted_clock_jitter",
-+		.data		= &permitted_clock_jitter,
-+		.maxlen		= sizeof(permitted_clock_jitter),
-+		.mode		= 0644,
-+		.proc_handler	= proc_doulongvec_minmax
-+	},
-+	{ 0 }
-+};
-+static ctl_table xen_table[] = {
-+	{
-+		.ctl_name	= 123,
-+		.procname	= "xen",
-+		.mode		= 0555,
-+		.child		= xen_subtable},
-+	{ 0 }
-+};
-+static int __init xen_sysctl_init(void)
++int
++swiotlb_dma_supported (struct device *hwdev, u64 mask)
 +{
-+	(void)register_sysctl_table(xen_table, 0);
-+	return 0;
++	return (mask >= ((1UL << IO_TLB_DMA_BITS) - 1));
 +}
-+__initcall(xen_sysctl_init);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/traps.c linux-2.6.18-xen/arch/i386/kernel/traps.c
---- linux-2.6.18.3/arch/i386/kernel/traps.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/kernel/traps.c	2006-11-19 14:26:22.000000000 +0100
-@@ -642,18 +642,11 @@
++
++EXPORT_SYMBOL(swiotlb_init);
++EXPORT_SYMBOL(swiotlb_map_single);
++EXPORT_SYMBOL(swiotlb_unmap_single);
++EXPORT_SYMBOL(swiotlb_map_sg);
++EXPORT_SYMBOL(swiotlb_unmap_sg);
++EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_single_for_device);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
++EXPORT_SYMBOL(swiotlb_map_page);
++EXPORT_SYMBOL(swiotlb_unmap_page);
++EXPORT_SYMBOL(swiotlb_dma_mapping_error);
++EXPORT_SYMBOL(swiotlb_dma_supported);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
+--- a/arch/i386/kernel/sysenter.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/sysenter.c	2007-03-14 10:55:14.000000000 +0100
+@@ -23,6 +23,10 @@
+ #include <asm/pgtable.h>
+ #include <asm/unistd.h>
+ 
++#ifdef CONFIG_XEN
++#include <xen/interface/callback.h>
++#endif
++
+ /*
+  * Should the kernel map a VDSO page into processes and pass its
+  * address down to glibc upon exec()?
+@@ -48,6 +52,7 @@
  
- static void io_check_error(unsigned char reason, struct pt_regs * regs)
+ void enable_sep_cpu(void)
  {
--	unsigned long i;
--
- 	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
- 	show_registers(regs);
++#ifndef CONFIG_X86_NO_TSS
+ 	int cpu = get_cpu();
+ 	struct tss_struct *tss = &per_cpu(init_tss, cpu);
  
- 	/* Re-enable the IOCK line, wait for a few seconds */
--	reason = (reason & 0xf) | 8;
--	outb(reason, 0x61);
--	i = 2000;
--	while (--i) udelay(1000);
--	reason &= ~8;
--	outb(reason, 0x61);
-+	clear_io_check_error(reason);
+@@ -62,6 +67,7 @@
+ 	wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
+ 	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
+ 	put_cpu();	
++#endif
  }
  
- static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/traps-xen.c linux-2.6.18-xen/arch/i386/kernel/traps-xen.c
---- linux-2.6.18.3/arch/i386/kernel/traps-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/traps-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,1185 @@
+ /*
+@@ -76,6 +82,18 @@
+ {
+ 	syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
+ 
++#ifdef CONFIG_XEN
++	if (boot_cpu_has(X86_FEATURE_SEP)) {
++		struct callback_register sysenter = {
++			.type = CALLBACKTYPE_sysenter,
++			.address = { __KERNEL_CS, (unsigned long)sysenter_entry },
++		};
++
++		if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0)
++			clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
++	}
++#endif
++
+ #ifdef CONFIG_COMPAT_VDSO
+ 	__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY_EXEC);
+ 	printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/time-xen.c b/arch/i386/kernel/time-xen.c
+--- a/arch/i386/kernel/time-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/time-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,1065 @@
 +/*
-+ *  linux/arch/i386/traps.c
++ *  linux/arch/i386/kernel/time.c
 + *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
 + *
-+ *  Pentium III FXSR, SSE support
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ * This file contains the PC-specific time handling details:
++ * reading the RTC at bootup, etc..
++ * 1994-07-02    Alan Modra
++ *	fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
++ * 1995-03-26    Markus Kuhn
++ *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
++ *      precision CMOS clock update
++ * 1996-05-03    Ingo Molnar
++ *      fixed time warps in do_[slow|fast]_gettimeoffset()
++ * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
++ *		"A Kernel Model for Precision Timekeeping" by Dave Mills
++ * 1998-09-05    (Various)
++ *	More robust do_fast_gettimeoffset() algorithm implemented
++ *	(works with APM, Cyrix 6x86MX and Centaur C6),
++ *	monotonic gettimeofday() with fast_get_timeoffset(),
++ *	drift-proof precision TSC calibration on boot
++ *	(C. Scott Ananian <cananian at alumni.princeton.edu>, Andrew D.
++ *	Balsa <andrebalsa at altern.org>, Philip Gladstone <philip at raptor.com>;
++ *	ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause at tu-harburg.de>).
++ * 1998-12-16    Andrea Arcangeli
++ *	Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
++ *	because was not accounting lost_ticks.
++ * 1998-12-24 Copyright (C) 1998  Andrea Arcangeli
++ *	Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
++ *	serialize accesses to xtime/lost_ticks).
 + */
 +
-+/*
-+ * 'Traps.c' handles hardware traps and faults after we have saved some
-+ * state in 'asm.s'.
-+ */
++#include <linux/errno.h>
 +#include <linux/sched.h>
 +#include <linux/kernel.h>
++#include <linux/param.h>
 +#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/timer.h>
 +#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
 +#include <linux/interrupt.h>
-+#include <linux/highmem.h>
-+#include <linux/kallsyms.h>
-+#include <linux/ptrace.h>
-+#include <linux/utsname.h>
-+#include <linux/kprobes.h>
-+#include <linux/kexec.h>
-+#include <linux/unwind.h>
-+
-+#ifdef CONFIG_EISA
-+#include <linux/ioport.h>
-+#include <linux/eisa.h>
-+#endif
-+
-+#ifdef CONFIG_MCA
++#include <linux/time.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++#include <linux/bcd.h>
++#include <linux/efi.h>
 +#include <linux/mca.h>
-+#endif
++#include <linux/sysctl.h>
++#include <linux/percpu.h>
++#include <linux/kernel_stat.h>
++#include <linux/posix-timers.h>
 +
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
 +#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/debugreg.h>
-+#include <asm/desc.h>
-+#include <asm/i387.h>
-+#include <asm/nmi.h>
-+#include <asm/unwind.h>
 +#include <asm/smp.h>
++#include <asm/irq.h>
++#include <asm/msr.h>
++#include <asm/delay.h>
++#include <asm/mpspec.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/timer.h>
++#include <asm/time.h>
++
++#include "mach_time.h"
++
++#include <linux/timex.h>
++
++#include <asm/hpet.h>
++
 +#include <asm/arch_hooks.h>
-+#include <asm/kdebug.h>
 +
-+#include <linux/module.h>
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
 +
-+#include "mach_traps.h"
++#include <asm/i8259.h>
 +
-+asmlinkage int system_call(void);
++int pit_latch_buggy;              /* extern */
 +
-+struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
-+		{ 0, 0 }, { 0, 0 } };
++#define USEC_PER_TICK (USEC_PER_SEC / HZ)
++#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
++#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
 +
-+/* Do we ignore FPU interrupts ? */
-+char ignore_fpu_irq = 0;
++#define NS_SCALE	10 /* 2^10, carefully chosen */
++#define US_SCALE	32 /* 2^32, arbitralrily chosen */
 +
-+#ifndef CONFIG_X86_NO_IDT
-+/*
-+ * The IDT has to be page-aligned to simplify the Pentium
-+ * F0 0F bug workaround.. We have a special link segment
-+ * for this.
-+ */
-+struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
-+#endif
++unsigned int cpu_khz;	/* Detected as we calibrate the TSC */
++EXPORT_SYMBOL(cpu_khz);
 +
-+asmlinkage void divide_error(void);
-+asmlinkage void debug(void);
-+asmlinkage void nmi(void);
-+asmlinkage void int3(void);
-+asmlinkage void overflow(void);
-+asmlinkage void bounds(void);
-+asmlinkage void invalid_op(void);
-+asmlinkage void device_not_available(void);
-+asmlinkage void coprocessor_segment_overrun(void);
-+asmlinkage void invalid_TSS(void);
-+asmlinkage void segment_not_present(void);
-+asmlinkage void stack_segment(void);
-+asmlinkage void general_protection(void);
-+asmlinkage void page_fault(void);
-+asmlinkage void coprocessor_error(void);
-+asmlinkage void simd_coprocessor_error(void);
-+asmlinkage void alignment_check(void);
-+#ifndef CONFIG_XEN
-+asmlinkage void spurious_interrupt_bug(void);
-+#else
-+asmlinkage void fixup_4gb_segment(void);
-+#endif
-+asmlinkage void machine_check(void);
++DEFINE_SPINLOCK(rtc_lock);
++EXPORT_SYMBOL(rtc_lock);
 +
-+static int kstack_depth_to_print = 24;
-+#ifdef CONFIG_STACK_UNWIND
-+static int call_trace = 1;
-+#else
-+#define call_trace (-1)
-+#endif
-+ATOMIC_NOTIFIER_HEAD(i386die_chain);
++extern struct init_timer_opts timer_tsc_init;
++extern struct timer_opts timer_tsc;
++#define timer_none timer_tsc
 +
-+int register_die_notifier(struct notifier_block *nb)
++/* These are peridically updated in shared_info, and then copied here. */
++struct shadow_time_info {
++	u64 tsc_timestamp;     /* TSC at last update of time vals.  */
++	u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
++	u32 tsc_to_nsec_mul;
++	u32 tsc_to_usec_mul;
++	int tsc_shift;
++	u32 version;
++};
++static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
++static struct timespec shadow_tv;
++static u32 shadow_tv_version;
++
++/* Keep track of last time we did processing/updating of jiffies and xtime. */
++static u64 processed_system_time;   /* System time (ns) at last processing. */
++static DEFINE_PER_CPU(u64, processed_system_time);
++
++/* How much CPU time was spent blocked and how much was 'stolen'? */
++static DEFINE_PER_CPU(u64, processed_stolen_time);
++static DEFINE_PER_CPU(u64, processed_blocked_time);
++
++/* Current runstate of each CPU (updated automatically by the hypervisor). */
++static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
++
++/* Must be signed, as it's compared with s64 quantities which can be -ve. */
++#define NS_PER_TICK (1000000000LL/HZ)
++
++static inline void __normalize_time(time_t *sec, s64 *nsec)
++{
++	while (*nsec >= NSEC_PER_SEC) {
++		(*nsec) -= NSEC_PER_SEC;
++		(*sec)++;
++	}
++	while (*nsec < 0) {
++		(*nsec) += NSEC_PER_SEC;
++		(*sec)--;
++	}
++}
++
++/* Does this guest OS track Xen time, or set its wall clock independently? */
++static int independent_wallclock = 0;
++static int __init __independent_wallclock(char *str)
 +{
-+	vmalloc_sync_all();
-+	return atomic_notifier_chain_register(&i386die_chain, nb);
++	independent_wallclock = 1;
++	return 1;
 +}
-+EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
++__setup("independent_wallclock", __independent_wallclock);
 +
-+int unregister_die_notifier(struct notifier_block *nb)
++/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
++static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
++static int __init __permitted_clock_jitter(char *str)
 +{
-+	return atomic_notifier_chain_unregister(&i386die_chain, nb);
++	permitted_clock_jitter = simple_strtoul(str, NULL, 0);
++	return 1;
 +}
-+EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
++__setup("permitted_clock_jitter=", __permitted_clock_jitter);
 +
-+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
++#ifndef CONFIG_X86
++int tsc_disable __devinitdata = 0;
++#endif
++
++static void delay_tsc(unsigned long loops)
 +{
-+	return	p > (void *)tinfo &&
-+		p < (void *)tinfo + THREAD_SIZE - 3;
++	unsigned long bclock, now;
++
++	rdtscl(bclock);
++	do {
++		rep_nop();
++		rdtscl(now);
++	} while ((now - bclock) < loops);
 +}
 +
++struct timer_opts timer_tsc = {
++	.name = "tsc",
++	.delay = delay_tsc,
++};
++
 +/*
-+ * Print one address/symbol entries per line.
++ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
++ * yielding a 64-bit result.
 + */
-+static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
++static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
 +{
-+	printk(" [<%08lx>] ", addr);
++	u64 product;
++	u32 tmp1, tmp2;
 +
-+	print_symbol("%s\n", addr);
++	if (shift < 0)
++		delta >>= -shift;
++	else
++		delta <<= shift;
++
++	__asm__ (
++		"mul  %5       ; "
++		"mov  %4,%%eax ; "
++		"mov  %%edx,%4 ; "
++		"mul  %5       ; "
++		"xor  %5,%5    ; "
++		"add  %4,%%eax ; "
++		"adc  %5,%%edx ; "
++		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
++		: "a" ((u32)delta), "1" ((u32)(delta >> US_SCALE)), "2" (mul_frac) );
++
++	return product;
 +}
 +
-+static inline unsigned long print_context_stack(struct thread_info *tinfo,
-+				unsigned long *stack, unsigned long ebp,
-+				char *log_lvl)
++int read_current_timer(unsigned long *timer_val)
 +{
-+	unsigned long addr;
++	rdtscl(*timer_val);
++	return 0;
++}
 +
-+#ifdef	CONFIG_FRAME_POINTER
-+	while (valid_stack_ptr(tinfo, (void *)ebp)) {
-+		addr = *(unsigned long *)(ebp + 4);
-+		print_addr_and_symbol(addr, log_lvl);
-+		/*
-+		 * break out of recursive entries (such as
-+		 * end_of_stack_stop_unwind_function):
-+	 	 */
-+		if (ebp == *(unsigned long *)ebp)
-+			break;
-+		ebp = *(unsigned long *)ebp;
-+	}
-+#else
-+	while (valid_stack_ptr(tinfo, stack)) {
-+		addr = *stack++;
-+		if (__kernel_text_address(addr))
-+			print_addr_and_symbol(addr, log_lvl);
-+	}
-+#endif
-+	return ebp;
++void init_cpu_khz(void)
++{
++	u64 __cpu_khz = 1000000ULL << US_SCALE;
++	struct vcpu_time_info *info;
++	info = &HYPERVISOR_shared_info->vcpu_info[0].time;
++	do_div(__cpu_khz, info->tsc_to_system_mul);
++	if (info->tsc_shift < 0)
++		cpu_khz = __cpu_khz << -info->tsc_shift;
++	else
++		cpu_khz = __cpu_khz >> info->tsc_shift;
 +}
 +
-+static asmlinkage int
-+show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
++static u64 get_nsec_offset(struct shadow_time_info *shadow)
 +{
-+	int n = 0;
++	u64 now, delta;
++	rdtscll(now);
++	delta = now - shadow->tsc_timestamp;
++	return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
++}
 +
-+	while (unwind(info) == 0 && UNW_PC(info)) {
-+		n++;
-+		print_addr_and_symbol(UNW_PC(info), log_lvl);
-+		if (arch_unw_user_mode(info))
-+			break;
-+	}
-+	return n;
++static unsigned long get_usec_offset(struct shadow_time_info *shadow)
++{
++	u64 now, delta;
++	rdtscll(now);
++	delta = now - shadow->tsc_timestamp;
++	return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
 +}
 +
-+static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
-+			       unsigned long *stack, char *log_lvl)
++static void __update_wallclock(time_t sec, long nsec)
 +{
-+	unsigned long ebp;
++	long wtm_nsec, xtime_nsec;
++	time_t wtm_sec, xtime_sec;
++	u64 tmp, wc_nsec;
 +
-+	if (!task)
-+		task = current;
++	/* Adjust wall-clock time base based on jiffies ticks. */
++	wc_nsec = processed_system_time;
++	wc_nsec += sec * (u64)NSEC_PER_SEC;
++	wc_nsec += nsec;
 +
-+	if (call_trace >= 0) {
-+		int unw_ret = 0;
-+		struct unwind_frame_info info;
-+
-+		if (regs) {
-+			if (unwind_init_frame_info(&info, task, regs) == 0)
-+				unw_ret = show_trace_unwind(&info, log_lvl);
-+		} else if (task == current)
-+			unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
-+		else {
-+			if (unwind_init_blocked(&info, task) == 0)
-+				unw_ret = show_trace_unwind(&info, log_lvl);
-+		}
-+		if (unw_ret > 0) {
-+			if (call_trace == 1 && !arch_unw_user_mode(&info)) {
-+				print_symbol("DWARF2 unwinder stuck at %s\n",
-+					     UNW_PC(&info));
-+				if (UNW_SP(&info) >= PAGE_OFFSET) {
-+					printk("Leftover inexact backtrace:\n");
-+					stack = (void *)UNW_SP(&info);
-+				} else
-+					printk("Full inexact backtrace again:\n");
-+			} else if (call_trace >= 1)
-+				return;
-+			else
-+				printk("Full inexact backtrace again:\n");
-+		} else
-+			printk("Inexact backtrace:\n");
-+	}
++	/* Split wallclock base into seconds and nanoseconds. */
++	tmp = wc_nsec;
++	xtime_nsec = do_div(tmp, 1000000000);
++	xtime_sec  = (time_t)tmp;
 +
-+	if (task == current) {
-+		/* Grab ebp right from our regs */
-+		asm ("movl %%ebp, %0" : "=r" (ebp) : );
-+	} else {
-+		/* ebp is the last reg pushed by switch_to */
-+		ebp = *(unsigned long *) task->thread.esp;
-+	}
++	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
++	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
 +
-+	while (1) {
-+		struct thread_info *context;
-+		context = (struct thread_info *)
-+			((unsigned long)stack & (~(THREAD_SIZE - 1)));
-+		ebp = print_context_stack(context, stack, ebp, log_lvl);
-+		stack = (unsigned long*)context->previous_esp;
-+		if (!stack)
-+			break;
-+		printk("%s =======================\n", log_lvl);
-+	}
-+}
++	set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
++	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
 +
-+void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
-+{
-+	show_trace_log_lvl(task, regs, stack, "");
++	ntp_clear();
 +}
 +
-+static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
-+			       unsigned long *esp, char *log_lvl)
++static void update_wallclock(void)
 +{
-+	unsigned long *stack;
-+	int i;
-+
-+	if (esp == NULL) {
-+		if (task)
-+			esp = (unsigned long*)task->thread.esp;
-+		else
-+			esp = (unsigned long *)&esp;
-+	}
++	shared_info_t *s = HYPERVISOR_shared_info;
 +
-+	stack = esp;
-+	for(i = 0; i < kstack_depth_to_print; i++) {
-+		if (kstack_end(stack))
-+			break;
-+		if (i && ((i % 8) == 0))
-+			printk("\n%s       ", log_lvl);
-+		printk("%08lx ", *stack++);
-+	}
-+	printk("\n%sCall Trace:\n", log_lvl);
-+	show_trace_log_lvl(task, regs, esp, log_lvl);
-+}
++	do {
++		shadow_tv_version = s->wc_version;
++		rmb();
++		shadow_tv.tv_sec  = s->wc_sec;
++		shadow_tv.tv_nsec = s->wc_nsec;
++		rmb();
++	} while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
 +
-+void show_stack(struct task_struct *task, unsigned long *esp)
-+{
-+	printk("       ");
-+	show_stack_log_lvl(task, NULL, esp, "");
++	if (!independent_wallclock)
++		__update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
 +}
 +
 +/*
-+ * The architecture-independent dump_stack generator
++ * Reads a consistent set of time-base values from Xen, into a shadow data
++ * area.
 + */
-+void dump_stack(void)
++static void get_time_values_from_xen(void)
 +{
-+	unsigned long stack;
-+
-+	show_trace(current, NULL, &stack);
-+}
++	shared_info_t           *s = HYPERVISOR_shared_info;
++	struct vcpu_time_info   *src;
++	struct shadow_time_info *dst;
 +
-+EXPORT_SYMBOL(dump_stack);
++	src = &s->vcpu_info[smp_processor_id()].time;
++	dst = &per_cpu(shadow_time, smp_processor_id());
 +
-+void show_registers(struct pt_regs *regs)
-+{
-+	int i;
-+	int in_kernel = 1;
-+	unsigned long esp;
-+	unsigned short ss;
++	do {
++		dst->version = src->version;
++		rmb();
++		dst->tsc_timestamp     = src->tsc_timestamp;
++		dst->system_timestamp  = src->system_time;
++		dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
++		dst->tsc_shift         = src->tsc_shift;
++		rmb();
++	} while ((src->version & 1) | (dst->version ^ src->version));
 +
-+	esp = (unsigned long) (&regs->esp);
-+	savesegment(ss, ss);
-+	if (user_mode_vm(regs)) {
-+		in_kernel = 0;
-+		esp = regs->esp;
-+		ss = regs->xss & 0xffff;
-+	}
-+	print_modules();
-+	printk(KERN_EMERG "CPU:    %d\nEIP:    %04x:[<%08lx>]    %s VLI\n"
-+			"EFLAGS: %08lx   (%s %.*s) \n",
-+		smp_processor_id(), 0xffff & regs->xcs, regs->eip,
-+		print_tainted(), regs->eflags, system_utsname.release,
-+		(int)strcspn(system_utsname.version, " "),
-+		system_utsname.version);
-+	print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
-+	printk(KERN_EMERG "eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
-+		regs->eax, regs->ebx, regs->ecx, regs->edx);
-+	printk(KERN_EMERG "esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
-+		regs->esi, regs->edi, regs->ebp, esp);
-+	printk(KERN_EMERG "ds: %04x   es: %04x   ss: %04x\n",
-+		regs->xds & 0xffff, regs->xes & 0xffff, ss);
-+	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
-+		TASK_COMM_LEN, current->comm, current->pid,
-+		current_thread_info(), current, current->thread_info);
-+	/*
-+	 * When in-kernel, we also print out the stack and code at the
-+	 * time of the fault..
-+	 */
-+	if (in_kernel) {
-+		u8 __user *eip;
++	dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
++}
 +
-+		printk("\n" KERN_EMERG "Stack: ");
-+		show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
++static inline int time_values_up_to_date(int cpu)
++{
++	struct vcpu_time_info   *src;
++	struct shadow_time_info *dst;
 +
-+		printk(KERN_EMERG "Code: ");
++	src = &HYPERVISOR_shared_info->vcpu_info[cpu].time;
++	dst = &per_cpu(shadow_time, cpu);
 +
-+		eip = (u8 __user *)regs->eip - 43;
-+		for (i = 0; i < 64; i++, eip++) {
-+			unsigned char c;
++	rmb();
++	return (dst->version == src->version);
++}
 +
-+			if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
-+				printk(" Bad EIP value.");
-+				break;
-+			}
-+			if (eip == (u8 __user *)regs->eip)
-+				printk("<%02x> ", c);
-+			else
-+				printk("%02x ", c);
-+		}
-+	}
-+	printk("\n");
-+}	
++/*
++ * This is a special lock that is owned by the CPU and holds the index
++ * register we are working with.  It is required for NMI access to the
++ * CMOS/RTC registers.  See include/asm-i386/mc146818rtc.h for details.
++ */
++volatile unsigned long cmos_lock = 0;
++EXPORT_SYMBOL(cmos_lock);
 +
-+static void handle_BUG(struct pt_regs *regs)
++/* Routines for accessing the CMOS RAM/RTC. */
++unsigned char rtc_cmos_read(unsigned char addr)
 +{
-+	unsigned long eip = regs->eip;
-+	unsigned short ud2;
-+
-+	if (eip < PAGE_OFFSET)
-+		return;
-+	if (__get_user(ud2, (unsigned short __user *)eip))
-+		return;
-+	if (ud2 != 0x0b0f)
-+		return;
-+
-+	printk(KERN_EMERG "------------[ cut here ]------------\n");
-+#ifdef CONFIG_DEBUG_BUGVERBOSE
-+	do {
-+		unsigned short line;
-+		char *file;
-+		char c;
-+
-+		if (__get_user(line, (unsigned short __user *)(eip + 2)))
-+			break;
-+		if (__get_user(file, (char * __user *)(eip + 4)) ||
-+		    (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
-+			file = "<bad filename>";
++	unsigned char val;
++	lock_cmos_prefix(addr);
++	outb_p(addr, RTC_PORT(0));
++	val = inb_p(RTC_PORT(1));
++	lock_cmos_suffix(addr);
++	return val;
++}
++EXPORT_SYMBOL(rtc_cmos_read);
 +
-+		printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
-+		return;
-+	} while (0);
-+#endif
-+	printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
++void rtc_cmos_write(unsigned char val, unsigned char addr)
++{
++	lock_cmos_prefix(addr);
++	outb_p(addr, RTC_PORT(0));
++	outb_p(val, RTC_PORT(1));
++	lock_cmos_suffix(addr);
 +}
++EXPORT_SYMBOL(rtc_cmos_write);
 +
-+/* This is gone through when something in the kernel
-+ * has done something bad and is about to be terminated.
-+*/
-+void die(const char * str, struct pt_regs * regs, long err)
++/*
++ * This version of gettimeofday has microsecond resolution
++ * and better than microsecond precision on fast x86 machines with TSC.
++ */
++void do_gettimeofday(struct timeval *tv)
 +{
-+	static struct {
-+		spinlock_t lock;
-+		u32 lock_owner;
-+		int lock_owner_depth;
-+	} die = {
-+		.lock =			SPIN_LOCK_UNLOCKED,
-+		.lock_owner =		-1,
-+		.lock_owner_depth =	0
-+	};
-+	static int die_counter;
-+	unsigned long flags;
++	unsigned long seq;
++	unsigned long usec, sec;
++	unsigned long max_ntp_tick;
++	s64 nsec;
++	unsigned int cpu;
++	struct shadow_time_info *shadow;
++	u32 local_time_version;
 +
-+	oops_enter();
++	cpu = get_cpu();
++	shadow = &per_cpu(shadow_time, cpu);
 +
-+	if (die.lock_owner != raw_smp_processor_id()) {
-+		console_verbose();
-+		spin_lock_irqsave(&die.lock, flags);
-+		die.lock_owner = smp_processor_id();
-+		die.lock_owner_depth = 0;
-+		bust_spinlocks(1);
-+	}
-+	else
-+		local_save_flags(flags);
++	do {
++		local_time_version = shadow->version;
++		seq = read_seqbegin(&xtime_lock);
 +
-+	if (++die.lock_owner_depth < 3) {
-+		int nl = 0;
-+		unsigned long esp;
-+		unsigned short ss;
++		usec = get_usec_offset(shadow);
 +
-+		handle_BUG(regs);
-+		printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-+#ifdef CONFIG_PREEMPT
-+		printk(KERN_EMERG "PREEMPT ");
-+		nl = 1;
-+#endif
-+#ifdef CONFIG_SMP
-+		if (!nl)
-+			printk(KERN_EMERG);
-+		printk("SMP ");
-+		nl = 1;
-+#endif
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+		if (!nl)
-+			printk(KERN_EMERG);
-+		printk("DEBUG_PAGEALLOC");
-+		nl = 1;
-+#endif
-+		if (nl)
-+			printk("\n");
-+		if (notify_die(DIE_OOPS, str, regs, err,
-+					current->thread.trap_no, SIGSEGV) !=
-+				NOTIFY_STOP) {
-+			show_registers(regs);
-+			/* Executive summary in case the oops scrolled away */
-+			esp = (unsigned long) (&regs->esp);
-+			savesegment(ss, ss);
-+			if (user_mode(regs)) {
-+				esp = regs->esp;
-+				ss = regs->xss & 0xffff;
-+			}
-+			printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
-+			print_symbol("%s", regs->eip);
-+			printk(" SS:ESP %04x:%08lx\n", ss, esp);
++		/*
++		 * If time_adjust is negative then NTP is slowing the clock
++		 * so make sure not to go into next possible interval.
++		 * Better to lose some accuracy than have time go backwards..
++		 */
++		if (unlikely(time_adjust < 0)) {
++			max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
++			usec = min(usec, max_ntp_tick);
 +		}
-+		else
-+			regs = NULL;
-+  	} else
-+		printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
 +
-+	bust_spinlocks(0);
-+	die.lock_owner = -1;
-+	spin_unlock_irqrestore(&die.lock, flags);
++		sec = xtime.tv_sec;
++		usec += (xtime.tv_nsec / NSEC_PER_USEC);
 +
-+	if (!regs)
-+		return;
++		nsec = shadow->system_timestamp - processed_system_time;
++		__normalize_time(&sec, &nsec);
++		usec += (long)nsec / NSEC_PER_USEC;
 +
-+	if (kexec_should_crash(current))
-+		crash_kexec(regs);
++		if (unlikely(!time_values_up_to_date(cpu))) {
++			/*
++			 * We may have blocked for a long time,
++			 * rendering our calculations invalid
++			 * (e.g. the time delta may have
++			 * overflowed). Detect that and recalculate
++			 * with fresh values.
++			 */
++			get_time_values_from_xen();
++			continue;
++		}
++	} while (read_seqretry(&xtime_lock, seq) ||
++		 (local_time_version != shadow->version));
 +
-+	if (in_interrupt())
-+		panic("Fatal exception in interrupt");
++	put_cpu();
 +
-+	if (panic_on_oops)
-+		panic("Fatal exception");
++	while (usec >= USEC_PER_SEC) {
++		usec -= USEC_PER_SEC;
++		sec++;
++	}
 +
-+	oops_exit();
-+	do_exit(SIGSEGV);
++	tv->tv_sec = sec;
++	tv->tv_usec = usec;
 +}
 +
-+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-+{
-+	if (!user_mode_vm(regs))
-+		die(str, regs, err);
-+}
++EXPORT_SYMBOL(do_gettimeofday);
 +
-+static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
-+			      struct pt_regs * regs, long error_code,
-+			      siginfo_t *info)
++int do_settimeofday(struct timespec *tv)
 +{
-+	struct task_struct *tsk = current;
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = trapnr;
-+
-+	if (regs->eflags & VM_MASK) {
-+		if (vm86)
-+			goto vm86_trap;
-+		goto trap_signal;
-+	}
++	time_t sec;
++	s64 nsec;
++	unsigned int cpu;
++	struct shadow_time_info *shadow;
++	dom0_op_t op;
 +
-+	if (!user_mode(regs))
-+		goto kernel_trap;
++	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++		return -EINVAL;
 +
-+	trap_signal: {
-+		if (info)
-+			force_sig_info(signr, info, tsk);
-+		else
-+			force_sig(signr, tsk);
-+		return;
-+	}
++	cpu = get_cpu();
++	shadow = &per_cpu(shadow_time, cpu);
 +
-+	kernel_trap: {
-+		if (!fixup_exception(regs))
-+			die(str, regs, error_code);
-+		return;
-+	}
++	write_seqlock_irq(&xtime_lock);
 +
-+	vm86_trap: {
-+		int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
-+		if (ret) goto trap_signal;
-+		return;
++	/*
++	 * Ensure we don't get blocked for a long time so that our time delta
++	 * overflows. If that were to happen then our shadow time values would
++	 * be stale, so we can retry with fresh ones.
++	 */
++	for (;;) {
++		nsec = tv->tv_nsec - get_nsec_offset(shadow);
++		if (time_values_up_to_date(cpu))
++			break;
++		get_time_values_from_xen();
 +	}
-+}
-+
-+#define DO_ERROR(trapnr, signr, str, name) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
-+}
++	sec = tv->tv_sec;
++	__normalize_time(&sec, &nsec);
 +
-+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	siginfo_t info; \
-+	info.si_signo = signr; \
-+	info.si_errno = 0; \
-+	info.si_code = sicode; \
-+	info.si_addr = (void __user *)siaddr; \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
-+}
++	if (is_initial_xendomain() && !independent_wallclock) {
++		op.cmd = DOM0_SETTIME;
++		op.u.settime.secs        = sec;
++		op.u.settime.nsecs       = nsec;
++		op.u.settime.system_time = shadow->system_timestamp;
++		HYPERVISOR_dom0_op(&op);
++		update_wallclock();
++	} else if (independent_wallclock) {
++		nsec -= shadow->system_timestamp;
++		__normalize_time(&sec, &nsec);
++		__update_wallclock(sec, nsec);
++	}
 +
-+#define DO_VM86_ERROR(trapnr, signr, str, name) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
-+}
++	write_sequnlock_irq(&xtime_lock);
 +
-+#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	siginfo_t info; \
-+	info.si_signo = signr; \
-+	info.si_errno = 0; \
-+	info.si_code = sicode; \
-+	info.si_addr = (void __user *)siaddr; \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
++	put_cpu();
++
++	clock_was_set();
++	return 0;
 +}
 +
-+DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)
-+#ifndef CONFIG_KPROBES
-+DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
-+#endif
-+DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
-+DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
-+DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
-+DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
-+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-+DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
-+DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
-+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-+DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
++EXPORT_SYMBOL(do_settimeofday);
 +
-+fastcall void __kprobes do_general_protection(struct pt_regs * regs,
-+					      long error_code)
++static void sync_xen_wallclock(unsigned long dummy);
++static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
++static void sync_xen_wallclock(unsigned long dummy)
 +{
-+	current->thread.error_code = error_code;
-+	current->thread.trap_no = 13;
++	time_t sec;
++	s64 nsec;
++	dom0_op_t op;
 +
-+	if (regs->eflags & VM_MASK)
-+		goto gp_in_vm86;
++	if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
++		return;
 +
-+	if (!user_mode(regs))
-+		goto gp_in_kernel;
++	write_seqlock_irq(&xtime_lock);
 +
-+	current->thread.error_code = error_code;
-+	current->thread.trap_no = 13;
-+	force_sig(SIGSEGV, current);
-+	return;
++	sec  = xtime.tv_sec;
++	nsec = xtime.tv_nsec;
++	__normalize_time(&sec, &nsec);
 +
-+gp_in_vm86:
-+	local_irq_enable();
-+	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
-+	return;
++	op.cmd = DOM0_SETTIME;
++	op.u.settime.secs        = sec;
++	op.u.settime.nsecs       = nsec;
++	op.u.settime.system_time = processed_system_time;
++	HYPERVISOR_dom0_op(&op);
 +
-+gp_in_kernel:
-+	if (!fixup_exception(regs)) {
-+		if (notify_die(DIE_GPF, "general protection fault", regs,
-+				error_code, 13, SIGSEGV) == NOTIFY_STOP)
-+			return;
-+		die("general protection fault", regs, error_code);
-+	}
++	update_wallclock();
++
++	write_sequnlock_irq(&xtime_lock);
++
++	/* Once per minute. */
++	mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
 +}
 +
-+static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
++static int set_rtc_mmss(unsigned long nowtime)
 +{
-+	printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
-+			"to continue\n");
-+	printk(KERN_EMERG "You probably have a hardware problem with your RAM "
-+			"chips\n");
++	int retval;
++	unsigned long flags;
 +
-+	/* Clear and disable the memory parity error line. */
-+	clear_mem_error(reason);
++	if (independent_wallclock || !is_initial_xendomain())
++		return 0;
++
++	/* gets recalled with irq locally disabled */
++	spin_lock_irqsave(&rtc_lock, flags);
++	retval = set_wallclock(nowtime);
++	spin_unlock_irqrestore(&rtc_lock, flags);
++
++	return retval;
 +}
 +
-+static void io_check_error(unsigned char reason, struct pt_regs * regs)
++/* monotonic_clock(): returns # of nanoseconds passed since time_init()
++ *		Note: This function is required to return accurate
++ *		time even in the absence of multiple timer ticks.
++ */
++unsigned long long monotonic_clock(void)
 +{
-+	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
-+	show_registers(regs);
++	int cpu = get_cpu();
++	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++	u64 time;
++	u32 local_time_version;
 +
-+	/* Re-enable the IOCK line, wait for a few seconds */
-+	clear_io_check_error(reason);
++	do {
++		local_time_version = shadow->version;
++		barrier();
++		time = shadow->system_timestamp + get_nsec_offset(shadow);
++		if (!time_values_up_to_date(cpu))
++			get_time_values_from_xen();
++		barrier();
++	} while (local_time_version != shadow->version);
++
++	put_cpu();
++
++	return time;
++}
++EXPORT_SYMBOL(monotonic_clock);
++
++unsigned long long sched_clock(void)
++{
++	return monotonic_clock();
 +}
 +
-+static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++unsigned long profile_pc(struct pt_regs *regs)
 +{
-+#ifdef CONFIG_MCA
-+	/* Might actually be able to figure out what the guilty party
-+	* is. */
-+	if( MCA_bus ) {
-+		mca_handle_nmi();
-+		return;
++	unsigned long pc = instruction_pointer(regs);
++
++#ifdef CONFIG_SMP
++	if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++#ifdef CONFIG_FRAME_POINTER
++		return *(unsigned long *)(regs->ebp + 4);
++#else
++		unsigned long *sp;
++		if ((regs->xcs & 3) == 0)
++			sp = (unsigned long *)&regs->esp;
++		else
++			sp = (unsigned long *)regs->esp;
++		/* Return address is either directly at stack pointer
++		   or above a saved eflags. Eflags has bits 22-31 zero,
++		   kernel addresses don't. */
++ 		if (sp[0] >> 22)
++			return sp[0];
++		if (sp[1] >> 22)
++			return sp[1];
++#endif
 +	}
 +#endif
-+	printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
-+		reason, smp_processor_id());
-+	printk("Dazed and confused, but trying to continue\n");
-+	printk("Do you have a strange power saving mode enabled?\n");
++	return pc;
 +}
++EXPORT_SYMBOL(profile_pc);
 +
-+static DEFINE_SPINLOCK(nmi_print_lock);
-+
-+void die_nmi (struct pt_regs *regs, const char *msg)
++irqreturn_t timer_interrupt(int irq, void *dev_id)
 +{
-+	if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
-+	    NOTIFY_STOP)
-+		return;
-+
-+	spin_lock(&nmi_print_lock);
-+	/*
-+	* We are in trouble anyway, lets at least try
-+	* to get a message out.
-+	*/
-+	bust_spinlocks(1);
-+	printk(KERN_EMERG "%s", msg);
-+	printk(" on CPU%d, eip %08lx, registers:\n",
-+		smp_processor_id(), regs->eip);
-+	show_registers(regs);
-+	printk(KERN_EMERG "console shuts up ...\n");
-+	console_silent();
-+	spin_unlock(&nmi_print_lock);
-+	bust_spinlocks(0);
++	s64 delta, delta_cpu, stolen, blocked;
++	u64 sched_time;
++	int i, cpu = smp_processor_id();
++	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++	struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
 +
-+	/* If we are in kernel we are probably nested up pretty bad
-+	 * and might aswell get out now while we still can.
-+	*/
-+	if (!user_mode_vm(regs)) {
-+		current->thread.trap_no = 2;
-+		crash_kexec(regs);
-+	}
++	write_seqlock(&xtime_lock);
 +
-+	do_exit(SIGSEGV);
-+}
++	do {
++		get_time_values_from_xen();
 +
-+static void default_do_nmi(struct pt_regs * regs)
-+{
-+	unsigned char reason = 0;
++		/* Obtain a consistent snapshot of elapsed wallclock cycles. */
++		delta = delta_cpu =
++			shadow->system_timestamp + get_nsec_offset(shadow);
++		delta     -= processed_system_time;
++		delta_cpu -= per_cpu(processed_system_time, cpu);
 +
-+	/* Only the BSP gets external NMIs from the system.  */
-+	if (!smp_processor_id())
-+		reason = get_nmi_reason();
-+ 
-+	if (!(reason & 0xc0)) {
-+		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
-+							== NOTIFY_STOP)
-+			return;
-+#ifdef CONFIG_X86_LOCAL_APIC
 +		/*
-+		 * Ok, so this is none of the documented NMI sources,
-+		 * so it must be the NMI watchdog.
++		 * Obtain a consistent snapshot of stolen/blocked cycles. We
++		 * can use state_entry_time to detect if we get preempted here.
 +		 */
-+		if (nmi_watchdog) {
-+			nmi_watchdog_tick(regs);
-+			return;
-+		}
-+#endif
-+		unknown_nmi_error(reason, regs);
-+		return;
++		do {
++			sched_time = runstate->state_entry_time;
++			barrier();
++			stolen = runstate->time[RUNSTATE_runnable] +
++				runstate->time[RUNSTATE_offline] -
++				per_cpu(processed_stolen_time, cpu);
++			blocked = runstate->time[RUNSTATE_blocked] -
++				per_cpu(processed_blocked_time, cpu);
++			barrier();
++		} while (sched_time != runstate->state_entry_time);
++	} while (!time_values_up_to_date(cpu));
++
++	if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
++	     unlikely(delta_cpu < -(s64)permitted_clock_jitter))
++	    && printk_ratelimit()) {
++		printk("Timer ISR/%d: Time went backwards: "
++		       "delta=%lld delta_cpu=%lld shadow=%lld "
++		       "off=%lld processed=%lld cpu_processed=%lld\n",
++		       cpu, delta, delta_cpu, shadow->system_timestamp,
++		       (s64)get_nsec_offset(shadow),
++		       processed_system_time,
++		       per_cpu(processed_system_time, cpu));
++		for (i = 0; i < num_online_cpus(); i++)
++			printk(" %d: %lld\n", i,
++			       per_cpu(processed_system_time, i));
 +	}
-+	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
-+		return;
-+	if (reason & 0x80)
-+		mem_parity_error(reason, regs);
-+	if (reason & 0x40)
-+		io_check_error(reason, regs);
-+	/*
-+	 * Reassert NMI in case it became active meanwhile
-+	 * as it's edge-triggered.
-+	 */
-+	reassert_nmi();
-+}
 +
-+static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
-+{
-+	return 0;
-+}
-+ 
-+static nmi_callback_t nmi_callback = dummy_nmi_callback;
-+ 
-+fastcall void do_nmi(struct pt_regs * regs, long error_code)
-+{
-+	int cpu;
++	/* System-wide jiffy work. */
++	while (delta >= NS_PER_TICK) {
++		delta -= NS_PER_TICK;
++		processed_system_time += NS_PER_TICK;
++		do_timer(1);
++	}
 +
-+	nmi_enter();
++	if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
++		update_wallclock();
++		clock_was_set();
++	}
 +
-+	cpu = smp_processor_id();
++	write_sequnlock(&xtime_lock);
 +
-+	++nmi_count(cpu);
++	/*
++	 * Account stolen ticks.
++	 * HACK: Passing NULL to account_steal_time()
++	 * ensures that the ticks are accounted as stolen.
++	 */
++	if ((stolen > 0) && (delta_cpu > 0)) {
++		delta_cpu -= stolen;
++		if (unlikely(delta_cpu < 0))
++			stolen += delta_cpu; /* clamp local-time progress */
++		do_div(stolen, NS_PER_TICK);
++		per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
++		per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
++		account_steal_time(NULL, (cputime_t)stolen);
++	}
 +
-+	if (!rcu_dereference(nmi_callback)(regs, cpu))
-+		default_do_nmi(regs);
++	/*
++	 * Account blocked ticks.
++	 * HACK: Passing idle_task to account_steal_time()
++	 * ensures that the ticks are accounted as idle/wait.
++	 */
++	if ((blocked > 0) && (delta_cpu > 0)) {
++		delta_cpu -= blocked;
++		if (unlikely(delta_cpu < 0))
++			blocked += delta_cpu; /* clamp local-time progress */
++		do_div(blocked, NS_PER_TICK);
++		per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
++		per_cpu(processed_system_time, cpu)  += blocked * NS_PER_TICK;
++		account_steal_time(idle_task(cpu), (cputime_t)blocked);
++	}
 +
-+	nmi_exit();
-+}
++	/* Account user/system ticks. */
++	if (delta_cpu > 0) {
++		do_div(delta_cpu, NS_PER_TICK);
++		per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
++		if (user_mode(get_irq_regs()))
++			account_user_time(current, (cputime_t)delta_cpu);
++		else
++			account_system_time(current, HARDIRQ_OFFSET,
++					    (cputime_t)delta_cpu);
++	}
 +
-+void set_nmi_callback(nmi_callback_t callback)
-+{
-+	vmalloc_sync_all();
-+	rcu_assign_pointer(nmi_callback, callback);
-+}
-+EXPORT_SYMBOL_GPL(set_nmi_callback);
++	/* Local timer processing (see update_process_times()). */
++	run_local_timers();
++	if (rcu_pending(cpu))
++		rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
++	scheduler_tick();
++	run_posix_cpu_timers(current);
 +
-+void unset_nmi_callback(void)
-+{
-+	nmi_callback = dummy_nmi_callback;
++	return IRQ_HANDLED;
 +}
-+EXPORT_SYMBOL_GPL(unset_nmi_callback);
 +
-+#ifdef CONFIG_KPROBES
-+fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
++static void init_missing_ticks_accounting(int cpu)
 +{
-+	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
-+			== NOTIFY_STOP)
-+		return;
-+	/* This is an interrupt gate, because kprobes wants interrupts
-+	disabled.  Normal trap handlers don't. */
-+	restore_interrupts(regs);
-+	do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
++	struct vcpu_register_runstate_memory_area area;
++	struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++
++	memset(runstate, 0, sizeof(*runstate));
++
++	area.addr.v = runstate;
++	HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
++
++	per_cpu(processed_blocked_time, cpu) =
++		runstate->time[RUNSTATE_blocked];
++	per_cpu(processed_stolen_time, cpu) =
++		runstate->time[RUNSTATE_runnable] +
++		runstate->time[RUNSTATE_offline];
 +}
-+#endif
 +
-+/*
-+ * Our handling of the processor debug registers is non-trivial.
-+ * We do not clear them on entry and exit from the kernel. Therefore
-+ * it is possible to get a watchpoint trap here from inside the kernel.
-+ * However, the code in ./ptrace.c has ensured that the user can
-+ * only set watchpoints on userspace addresses. Therefore the in-kernel
-+ * watchpoint trap can only occur in code which is reading/writing
-+ * from user space. Such code must not hold kernel locks (since it
-+ * can equally take a page fault), therefore it is safe to call
-+ * force_sig_info even though that claims and releases locks.
-+ * 
-+ * Code in ./signal.c ensures that the debug control register
-+ * is restored before we deliver any signal, and therefore that
-+ * user code runs with the correct debug control register even though
-+ * we clear it here.
-+ *
-+ * Being careful here means that we don't have to be as careful in a
-+ * lot of more complicated places (task switching can be a bit lazy
-+ * about restoring all the debug state, and ptrace doesn't have to
-+ * find every occurrence of the TF bit that could be saved away even
-+ * by user code)
-+ */
-+fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
++/* not static: needed by APM */
++unsigned long get_cmos_time(void)
 +{
-+	unsigned int condition;
-+	struct task_struct *tsk = current;
++	unsigned long retval;
++	unsigned long flags;
 +
-+	get_debugreg(condition, 6);
++	spin_lock_irqsave(&rtc_lock, flags);
 +
-+	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
-+					SIGTRAP) == NOTIFY_STOP)
-+		return;
-+	/* It's safe to allow irq's after DR6 has been saved */
-+	if (regs->eflags & X86_EFLAGS_IF)
-+		local_irq_enable();
++	retval = get_wallclock();
 +
-+	/* Mask out spurious debug traps due to lazy DR7 setting */
-+	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
-+		if (!tsk->thread.debugreg[7])
-+			goto clear_dr7;
-+	}
++	spin_unlock_irqrestore(&rtc_lock, flags);
 +
-+	if (regs->eflags & VM_MASK)
-+		goto debug_vm86;
++	return retval;
++}
++EXPORT_SYMBOL(get_cmos_time);
 +
-+	/* Save debug status register where ptrace can see it */
-+	tsk->thread.debugreg[6] = condition;
++static void sync_cmos_clock(unsigned long dummy);
++
++static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
++
++static void sync_cmos_clock(unsigned long dummy)
++{
++	struct timeval now, next;
++	int fail = 1;
 +
 +	/*
-+	 * Single-stepping through TF: make sure we ignore any events in
-+	 * kernel space (but re-enable TF when returning to user mode).
++	 * If we have an externally synchronized Linux clock, then update
++	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
++	 * called as close as possible to 500 ms before the new second starts.
++	 * This code is run on a timer.  If the clock is set, that timer
++	 * may not expire at the correct time.  Thus, we adjust...
 +	 */
-+	if (condition & DR_STEP) {
++	if (!ntp_synced())
 +		/*
-+		 * We already checked v86 mode above, so we can
-+		 * check for kernel mode by just checking the CPL
-+		 * of CS.
++		 * Not synced, exit, do not restart a timer (if one is
++		 * running, let it run out).
 +		 */
-+		if (!user_mode(regs))
-+			goto clear_TF_reenable;
-+	}
++		return;
 +
-+	/* Ok, finally something we can handle */
-+	send_sigtrap(tsk, regs, error_code);
++	do_gettimeofday(&now);
++	if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
++	    now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
++		fail = set_rtc_mmss(now.tv_sec);
 +
-+	/* Disable additional traps. They'll be re-enabled when
-+	 * the signal is delivered.
-+	 */
-+clear_dr7:
-+	set_debugreg(0, 7);
-+	return;
++	next.tv_usec = USEC_AFTER - now.tv_usec;
++	if (next.tv_usec <= 0)
++		next.tv_usec += USEC_PER_SEC;
 +
-+debug_vm86:
-+	handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
-+	return;
++	if (!fail)
++		next.tv_sec = 659;
++	else
++		next.tv_sec = 0;
 +
-+clear_TF_reenable:
-+	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
-+	regs->eflags &= ~TF_MASK;
-+	return;
++	if (next.tv_usec >= USEC_PER_SEC) {
++		next.tv_sec++;
++		next.tv_usec -= USEC_PER_SEC;
++	}
++	mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
 +}
 +
-+/*
-+ * Note that we play around with the 'TS' bit in an attempt to get
-+ * the correct behaviour even in the presence of the asynchronous
-+ * IRQ13 behaviour
-+ */
-+void math_error(void __user *eip)
++void notify_arch_cmos_timer(void)
 +{
-+	struct task_struct * task;
-+	siginfo_t info;
-+	unsigned short cwd, swd;
++	mod_timer(&sync_cmos_timer, jiffies + 1);
++	mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
++}
 +
++static long clock_cmos_diff;
++static unsigned long sleep_start;
++
++static int timer_suspend(struct sys_device *dev, pm_message_t state)
++{
 +	/*
-+	 * Save the info for the exception handler and clear the error.
-+	 */
-+	task = current;
-+	save_init_fpu(task);
-+	task->thread.trap_no = 16;
-+	task->thread.error_code = 0;
-+	info.si_signo = SIGFPE;
-+	info.si_errno = 0;
-+	info.si_code = __SI_FAULT;
-+	info.si_addr = eip;
-+	/*
-+	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
-+	 * status.  0x3f is the exception bits in these regs, 0x200 is the
-+	 * C1 reg you need in case of a stack fault, 0x040 is the stack
-+	 * fault bit.  We should only be taking one exception at a time,
-+	 * so if this combination doesn't produce any single exception,
-+	 * then we have a bad program that isn't syncronizing its FPU usage
-+	 * and it will suffer the consequences since we won't be able to
-+	 * fully reproduce the context of the exception
++	 * Estimate time zone so that set_time can update the clock
 +	 */
-+	cwd = get_fpu_cwd(task);
-+	swd = get_fpu_swd(task);
-+	switch (swd & ~cwd & 0x3f) {
-+		case 0x000: /* No unmasked exception */
-+			return;
-+		default:    /* Multiple exceptions */
-+			break;
-+		case 0x001: /* Invalid Op */
-+			/*
-+			 * swd & 0x240 == 0x040: Stack Underflow
-+			 * swd & 0x240 == 0x240: Stack Overflow
-+			 * User must clear the SF bit (0x40) if set
-+			 */
-+			info.si_code = FPE_FLTINV;
-+			break;
-+		case 0x002: /* Denormalize */
-+		case 0x010: /* Underflow */
-+			info.si_code = FPE_FLTUND;
-+			break;
-+		case 0x004: /* Zero Divide */
-+			info.si_code = FPE_FLTDIV;
-+			break;
-+		case 0x008: /* Overflow */
-+			info.si_code = FPE_FLTOVF;
-+			break;
-+		case 0x020: /* Precision */
-+			info.si_code = FPE_FLTRES;
-+			break;
++	unsigned long ctime =  get_cmos_time();
++
++	clock_cmos_diff = -ctime;
++	clock_cmos_diff += get_seconds();
++	sleep_start = ctime;
++	return 0;
++}
++
++static int timer_resume(struct sys_device *dev)
++{
++	unsigned long flags;
++	unsigned long sec;
++	unsigned long ctime = get_cmos_time();
++	long sleep_length = (ctime - sleep_start) * HZ;
++
++	if (sleep_length < 0) {
++		printk(KERN_WARNING "CMOS clock skew detected in timer resume!\n");
++		/* The time after the resume must not be earlier than the time
++		 * before the suspend or some nasty things will happen
++		 */
++		sleep_length = 0;
++		ctime = sleep_start;
 +	}
-+	force_sig_info(SIGFPE, &info, task);
++
++#ifdef CONFIG_HPET_TIMER
++	if (is_hpet_enabled())
++		hpet_reenable();
++#endif
++	sec = ctime + clock_cmos_diff;
++	write_seqlock_irqsave(&xtime_lock, flags);
++	xtime.tv_sec = sec;
++	xtime.tv_nsec = 0;
++	jiffies_64 += sleep_length;
++	write_sequnlock_irqrestore(&xtime_lock, flags);
++	touch_softlockup_watchdog();
++	return 0;
 +}
 +
-+fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
++static struct sysdev_class timer_sysclass = {
++	.resume = timer_resume,
++	.suspend = timer_suspend,
++	set_kset_name("timer"),
++};
++
++
++/* XXX this driverfs stuff should probably go elsewhere later -john */
++static struct sys_device device_timer = {
++	.id	= 0,
++	.cls	= &timer_sysclass,
++};
++
++static int time_init_device(void)
 +{
-+	ignore_fpu_irq = 1;
-+	math_error((void __user *)regs->eip);
++	int error = sysdev_class_register(&timer_sysclass);
++	if (!error)
++		error = sysdev_register(&device_timer);
++	return error;
 +}
 +
-+static void simd_math_error(void __user *eip)
++device_initcall(time_init_device);
++
++#ifdef CONFIG_HPET_TIMER
++extern void (*late_time_init)(void);
++/* Duplicate of time_init() below, with hpet_enable part added */
++static void __init hpet_time_init(void)
 +{
-+	struct task_struct * task;
-+	siginfo_t info;
-+	unsigned short mxcsr;
++	xtime.tv_sec = get_cmos_time();
++	xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
++	set_normalized_timespec(&wall_to_monotonic,
++		-xtime.tv_sec, -xtime.tv_nsec);
 +
-+	/*
-+	 * Save the info for the exception handler and clear the error.
-+	 */
-+	task = current;
-+	save_init_fpu(task);
-+	task->thread.trap_no = 19;
-+	task->thread.error_code = 0;
-+	info.si_signo = SIGFPE;
-+	info.si_errno = 0;
-+	info.si_code = __SI_FAULT;
-+	info.si_addr = eip;
-+	/*
-+	 * The SIMD FPU exceptions are handled a little differently, as there
-+	 * is only a single status/control register.  Thus, to determine which
-+	 * unmasked exception was caught we must mask the exception mask bits
-+	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
-+	 */
-+	mxcsr = get_fpu_mxcsr(task);
-+	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
-+		case 0x000:
-+		default:
-+			break;
-+		case 0x001: /* Invalid Op */
-+			info.si_code = FPE_FLTINV;
-+			break;
-+		case 0x002: /* Denormalize */
-+		case 0x010: /* Underflow */
-+			info.si_code = FPE_FLTUND;
-+			break;
-+		case 0x004: /* Zero Divide */
-+			info.si_code = FPE_FLTDIV;
-+			break;
-+		case 0x008: /* Overflow */
-+			info.si_code = FPE_FLTOVF;
-+			break;
-+		case 0x020: /* Precision */
-+			info.si_code = FPE_FLTRES;
-+			break;
++	if ((hpet_enable() >= 0) && hpet_use_timer) {
++		printk("Using HPET for base-timer\n");
 +	}
-+	force_sig_info(SIGFPE, &info, task);
++
++	do_time_init();
 +}
++#endif
 +
-+fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
-+					  long error_code)
++/* Dynamically-mapped IRQ. */
++DEFINE_PER_CPU(int, timer_irq);
++
++extern void (*late_time_init)(void);
++static void setup_cpu0_timer_irq(void)
 +{
-+	if (cpu_has_xmm) {
-+		/* Handle SIMD FPU exceptions on PIII+ processors. */
-+		ignore_fpu_irq = 1;
-+		simd_math_error((void __user *)regs->eip);
-+	} else {
-+		/*
-+		 * Handle strange cache flush from user space exception
-+		 * in all other cases.  This is undocumented behaviour.
-+		 */
-+		if (regs->eflags & VM_MASK) {
-+			handle_vm86_fault((struct kernel_vm86_regs *)regs,
-+					  error_code);
-+			return;
-+		}
-+		current->thread.trap_no = 19;
-+		current->thread.error_code = error_code;
-+		die_if_kernel("cache flush denied", regs, error_code);
-+		force_sig(SIGSEGV, current);
-+	}
++	per_cpu(timer_irq, 0) =
++		bind_virq_to_irqhandler(
++			VIRQ_TIMER,
++			0,
++			timer_interrupt,
++			SA_INTERRUPT,
++			"timer0",
++			NULL);
++	BUG_ON(per_cpu(timer_irq, 0) < 0);
 +}
 +
-+#ifndef CONFIG_XEN
-+fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
-+					  long error_code)
++void __init time_init(void)
 +{
-+#if 0
-+	/* No need to warn about this any longer. */
-+	printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
++#ifdef CONFIG_HPET_TIMER
++	if (is_hpet_capable()) {
++		/*
++		 * HPET initialization needs to do memory-mapped io. So, let
++		 * us do a late initialization after mem_init().
++		 */
++		late_time_init = hpet_time_init;
++		return;
++	}
 +#endif
++	get_time_values_from_xen();
++
++	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++	per_cpu(processed_system_time, 0) = processed_system_time;
++	init_missing_ticks_accounting(0);
++
++	update_wallclock();
++
++	init_cpu_khz();
++	printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
++	       cpu_khz / 1000, cpu_khz % 1000);
++
++	/* Cannot request_irq() until kmem is initialised. */
++	late_time_init = setup_cpu0_timer_irq;
 +}
 +
-+fastcall void setup_x86_bogus_stack(unsigned char * stk)
++/* Convert jiffies to system time. */
++u64 jiffies_to_st(unsigned long j)
 +{
-+	unsigned long *switch16_ptr, *switch32_ptr;
-+	struct pt_regs *regs;
-+	unsigned long stack_top, stack_bot;
-+	unsigned short iret_frame16_off;
-+	int cpu = smp_processor_id();
-+	/* reserve the space on 32bit stack for the magic switch16 pointer */
-+	memmove(stk, stk + 8, sizeof(struct pt_regs));
-+	switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
-+	regs = (struct pt_regs *)stk;
-+	/* now the switch32 on 16bit stack */
-+	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-+	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
-+	switch32_ptr = (unsigned long *)(stack_top - 8);
-+	iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
-+	/* copy iret frame on 16bit stack */
-+	memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
-+	/* fill in the switch pointers */
-+	switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
-+	switch16_ptr[1] = __ESPFIX_SS;
-+	switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
-+		8 - CPU_16BIT_STACK_SIZE;
-+	switch32_ptr[1] = __KERNEL_DS;
-+}
-+
-+fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
-+{
-+	unsigned long *switch32_ptr;
-+	unsigned char *stack16, *stack32;
-+	unsigned long stack_top, stack_bot;
-+	int len;
-+	int cpu = smp_processor_id();
-+	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-+	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
-+	switch32_ptr = (unsigned long *)(stack_top - 8);
-+	/* copy the data from 16bit stack to 32bit stack */
-+	len = CPU_16BIT_STACK_SIZE - 8 - sp;
-+	stack16 = (unsigned char *)(stack_bot + sp);
-+	stack32 = (unsigned char *)
-+		(switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
-+	memcpy(stack32, stack16, len);
-+	return stack32;
++	unsigned long seq;
++	long delta;
++	u64 st;
++
++	do {
++		seq = read_seqbegin(&xtime_lock);
++		delta = j - jiffies;
++		if (delta < 1) {
++			/* Triggers in some wrap-around cases, but that's okay:
++			 * we just end up with a shorter timeout. */
++			st = processed_system_time + NS_PER_TICK;
++		} else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
++			/* Very long timeout means there is no pending timer.
++			 * We indicate this to Xen by passing zero timeout. */
++			st = 0;
++		} else {
++			st = processed_system_time + delta * (u64)NS_PER_TICK;
++		}
++	} while (read_seqretry(&xtime_lock, seq));
++
++	return st;
 +}
-+#endif
++EXPORT_SYMBOL(jiffies_to_st);
 +
 +/*
-+ *  'math_state_restore()' saves the current math information in the
-+ * old math state array, and gets the new ones from the current task
-+ *
-+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
-+ * Don't touch unless you *really* know how it works.
-+ *
-+ * Must be called with kernel preemption disabled (in this case,
-+ * local interrupts are disabled at the call-site in entry.S).
++ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
++ * These functions are based on implementations from arch/s390/kernel/time.c
 + */
-+asmlinkage void math_state_restore(struct pt_regs regs)
++static void stop_hz_timer(void)
 +{
-+	struct thread_info *thread = current_thread_info();
-+	struct task_struct *tsk = thread->task;
++	unsigned int cpu = smp_processor_id();
++	unsigned long j;
 +
-+	/* NB. 'clts' is done for us by Xen during virtual trap. */
-+	if (!tsk_used_math(tsk))
-+		init_fpu(tsk);
-+	restore_fpu(tsk);
-+	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
-+}
++	cpu_set(cpu, nohz_cpu_mask);
 +
-+#ifndef CONFIG_MATH_EMULATION
++	/* See matching smp_mb in rcu_start_batch in rcupdate.c.  These mbs  */
++	/* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a  */
++	/* value of rcp->cur that matches rdp->quiescbatch and allows us to  */
++	/* stop the hz timer then the cpumasks created for subsequent values */
++	/* of cur in rcu_start_batch are guaranteed to pick up the updated   */
++	/* nohz_cpu_mask and so will not depend on this cpu.                 */
 +
-+asmlinkage void math_emulate(long arg)
-+{
-+	printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
-+	printk(KERN_EMERG "killing %s.\n",current->comm);
-+	force_sig(SIGFPE,current);
-+	schedule();
-+}
++	smp_mb();
 +
-+#endif /* CONFIG_MATH_EMULATION */
++	/* Leave ourselves in tick mode if rcu or softirq or timer pending. */
++	if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
++	    (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
++		cpu_clear(cpu, nohz_cpu_mask);
++		j = jiffies + 1;
++	}
 +
-+#ifdef CONFIG_X86_F00F_BUG
-+void __init trap_init_f00f_bug(void)
-+{
-+	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
++	if (HYPERVISOR_set_timer_op(jiffies_to_st(j)) != 0)
++		BUG();
++}
 +
-+	/*
-+	 * Update the IDT descriptor and reload the IDT so that
-+	 * it uses the read-only mapped virtual address.
-+	 */
-+	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
-+	load_idt(&idt_descr);
++static void start_hz_timer(void)
++{
++	cpu_clear(smp_processor_id(), nohz_cpu_mask);
 +}
-+#endif
 +
++void raw_safe_halt(void)
++{
++	stop_hz_timer();
++	/* Blocking includes an implicit local_irq_enable(). */
++	HYPERVISOR_block();
++	start_hz_timer();
++}
++EXPORT_SYMBOL(raw_safe_halt);
 +
-+/*
-+ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
-+ * for those that specify <dpl>|4 in the second field.
-+ */
-+static trap_info_t trap_table[] = {
-+	{  0, 0, __KERNEL_CS, (unsigned long)divide_error		},
-+	{  1, 0|4, __KERNEL_CS, (unsigned long)debug			},
-+	{  3, 3|4, __KERNEL_CS, (unsigned long)int3			},
-+	{  4, 3, __KERNEL_CS, (unsigned long)overflow			},
-+	{  5, 0, __KERNEL_CS, (unsigned long)bounds			},
-+	{  6, 0, __KERNEL_CS, (unsigned long)invalid_op			},
-+	{  7, 0|4, __KERNEL_CS, (unsigned long)device_not_available	},
-+	{  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
-+	{ 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS		},
-+	{ 11, 0, __KERNEL_CS, (unsigned long)segment_not_present	},
-+	{ 12, 0, __KERNEL_CS, (unsigned long)stack_segment		},
-+	{ 13, 0, __KERNEL_CS, (unsigned long)general_protection		},
-+	{ 14, 0|4, __KERNEL_CS, (unsigned long)page_fault		},
-+	{ 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment		},
-+	{ 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error		},
-+	{ 17, 0, __KERNEL_CS, (unsigned long)alignment_check		},
-+#ifdef CONFIG_X86_MCE
-+	{ 18, 0, __KERNEL_CS, (unsigned long)machine_check		},
-+#endif
-+	{ 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error	},
-+	{ SYSCALL_VECTOR,  3, __KERNEL_CS, (unsigned long)system_call	},
-+	{  0, 0,	   0, 0						}
-+};
++void halt(void)
++{
++	if (irqs_disabled())
++		HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
++}
++EXPORT_SYMBOL(halt);
 +
-+void __init trap_init(void)
++/* No locking required. We are only CPU running, and interrupts are off. */
++void time_resume(void)
 +{
-+	HYPERVISOR_set_trap_table(trap_table);
++	init_cpu_khz();
 +
-+	if (cpu_has_fxsr) {
-+		/*
-+		 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
-+		 * Generates a compile-time "error: zero width for bit-field" if
-+		 * the alignment is wrong.
-+		 */
-+		struct fxsrAlignAssert {
-+			int _:!(offsetof(struct task_struct,
-+					thread.i387.fxsave) & 15);
-+		};
++	get_time_values_from_xen();
 +
-+		printk(KERN_INFO "Enabling fast FPU save and restore... ");
-+		set_in_cr4(X86_CR4_OSFXSR);
-+		printk("done.\n");
-+	}
-+	if (cpu_has_xmm) {
-+		printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
-+				"support... ");
-+		set_in_cr4(X86_CR4_OSXMMEXCPT);
-+		printk("done.\n");
-+	}
++	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++	per_cpu(processed_system_time, 0) = processed_system_time;
++	init_missing_ticks_accounting(0);
 +
-+	/*
-+	 * Should be a barrier for any external CPU state.
-+	 */
-+	cpu_init();
++	update_wallclock();
 +}
 +
-+void smp_trap_init(trap_info_t *trap_ctxt)
++#ifdef CONFIG_SMP
++static char timer_name[NR_CPUS][15];
++
++void local_setup_timer(unsigned int cpu)
 +{
-+	trap_info_t *t = trap_table;
++	int seq;
 +
-+	for (t = trap_table; t->address; t++) {
-+		trap_ctxt[t->vector].flags = t->flags;
-+		trap_ctxt[t->vector].cs = t->cs;
-+		trap_ctxt[t->vector].address = t->address;
-+	}
++	BUG_ON(cpu == 0);
++
++	do {
++		seq = read_seqbegin(&xtime_lock);
++		/* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
++		per_cpu(processed_system_time, cpu) =
++			per_cpu(shadow_time, 0).system_timestamp;
++		init_missing_ticks_accounting(cpu);
++	} while (read_seqretry(&xtime_lock, seq));
++
++	sprintf(timer_name[cpu], "timer%d", cpu);
++	per_cpu(timer_irq, cpu) =
++		bind_virq_to_irqhandler(
++			VIRQ_TIMER,
++			cpu,
++			timer_interrupt,
++			SA_INTERRUPT,
++			timer_name[cpu],
++			NULL);
++	BUG_ON(per_cpu(timer_irq, cpu) < 0);
 +}
 +
-+static int __init kstack_setup(char *s)
++void local_teardown_timer(unsigned int cpu)
 +{
-+	kstack_depth_to_print = simple_strtoul(s, NULL, 0);
-+	return 1;
++	BUG_ON(cpu == 0);
++	unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
 +}
-+__setup("kstack=", kstack_setup);
++#endif
 +
-+#ifdef CONFIG_STACK_UNWIND
-+static int __init call_trace_setup(char *s)
++/*
++ * /proc/sys/xen: This really belongs in another file. It can stay here for
++ * now however.
++ */
++static ctl_table xen_subtable[] = {
++	{
++		.ctl_name	= 1,
++		.procname	= "independent_wallclock",
++		.data		= &independent_wallclock,
++		.maxlen		= sizeof(independent_wallclock),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec
++	},
++	{
++		.ctl_name	= 2,
++		.procname	= "permitted_clock_jitter",
++		.data		= &permitted_clock_jitter,
++		.maxlen		= sizeof(permitted_clock_jitter),
++		.mode		= 0644,
++		.proc_handler	= proc_doulongvec_minmax
++	},
++	{ 0 }
++};
++static ctl_table xen_table[] = {
++	{
++		.ctl_name	= 123,
++		.procname	= "xen",
++		.mode		= 0555,
++		.child		= xen_subtable},
++	{ 0 }
++};
++static int __init xen_sysctl_init(void)
 +{
-+	if (strcmp(s, "old") == 0)
-+		call_trace = -1;
-+	else if (strcmp(s, "both") == 0)
-+		call_trace = 0;
-+	else if (strcmp(s, "newfallback") == 0)
-+		call_trace = 1;
-+	else if (strcmp(s, "new") == 2)
-+		call_trace = 2;
-+	return 1;
++	(void)register_sysctl_table(xen_table, 0);
++	return 0;
 +}
-+__setup("call_trace=", call_trace_setup);
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/tsc.c linux-2.6.18-xen/arch/i386/kernel/tsc.c
---- linux-2.6.18.3/arch/i386/kernel/tsc.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/kernel/tsc.c	2006-11-19 14:26:22.000000000 +0100
-@@ -101,6 +101,7 @@
- 	return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
- }
- 
-+#ifndef CONFIG_XEN
- /*
-  * Scheduler clock - returns current time in nanosec units.
-  */
-@@ -124,6 +125,7 @@
- 	/* return the value in ns */
- 	return cycles_2_ns(this_offset);
- }
-+#endif
- 
- static unsigned long calculate_cpu_khz(void)
- {
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/vm86.c linux-2.6.18-xen/arch/i386/kernel/vm86.c
---- linux-2.6.18.3/arch/i386/kernel/vm86.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/kernel/vm86.c	2006-11-19 14:26:22.000000000 +0100
-@@ -97,7 +97,9 @@
- struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
- struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
- {
-+#ifndef CONFIG_X86_NO_TSS
- 	struct tss_struct *tss;
-+#endif
- 	struct pt_regs *ret;
- 	unsigned long tmp;
- 
-@@ -122,12 +124,16 @@
- 		do_exit(SIGSEGV);
- 	}
- 
-+#ifndef CONFIG_X86_NO_TSS
- 	tss = &per_cpu(init_tss, get_cpu());
-+#endif
- 	current->thread.esp0 = current->thread.saved_esp0;
- 	current->thread.sysenter_cs = __KERNEL_CS;
- 	load_esp0(tss, &current->thread);
- 	current->thread.saved_esp0 = 0;
-+#ifndef CONFIG_X86_NO_TSS
- 	put_cpu();
-+#endif
- 
- 	loadsegment(fs, current->thread.saved_fs);
- 	loadsegment(gs, current->thread.saved_gs);
-@@ -251,7 +257,9 @@
- 
- static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
++__initcall(xen_sysctl_init);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
+--- a/arch/i386/kernel/traps.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/traps.c	2007-03-14 10:55:14.000000000 +0100
+@@ -646,18 +646,11 @@
+ static __kprobes void
+ io_check_error(unsigned char reason, struct pt_regs * regs)
  {
-+#ifndef CONFIG_X86_NO_TSS
- 	struct tss_struct *tss;
-+#endif
- 	long eax;
- /*
-  * make sure the vm86() system call doesn't try to do anything silly
-@@ -296,12 +304,16 @@
- 	savesegment(fs, tsk->thread.saved_fs);
- 	savesegment(gs, tsk->thread.saved_gs);
- 
-+#ifndef CONFIG_X86_NO_TSS
- 	tss = &per_cpu(init_tss, get_cpu());
-+#endif
- 	tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
- 	if (cpu_has_sep)
- 		tsk->thread.sysenter_cs = 0;
- 	load_esp0(tss, &tsk->thread);
-+#ifndef CONFIG_X86_NO_TSS
- 	put_cpu();
-+#endif
+-	unsigned long i;
+-
+ 	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+ 	show_registers(regs);
  
- 	tsk->thread.screen_bitmap = info->screen_bitmap;
- 	if (info->flags & VM86_SCREEN_BITMAP)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/vmlinux.lds.S linux-2.6.18-xen/arch/i386/kernel/vmlinux.lds.S
---- linux-2.6.18.3/arch/i386/kernel/vmlinux.lds.S	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/kernel/vmlinux.lds.S	2006-11-19 14:26:22.000000000 +0100
-@@ -13,6 +13,12 @@
- OUTPUT_ARCH(i386)
- ENTRY(phys_startup_32)
- jiffies = jiffies_64;
-+
-+PHDRS {
-+	text PT_LOAD FLAGS(5);	/* R_E */
-+	data PT_LOAD FLAGS(7);	/* RWE */
-+	note PT_NOTE FLAGS(4);	/* R__ */
-+}
- SECTIONS
- {
-   . = __KERNEL_START;
-@@ -26,7 +32,7 @@
- 	KPROBES_TEXT
- 	*(.fixup)
- 	*(.gnu.warning)
--	} = 0x9090
-+	} :text = 0x9090
- 
-   _etext = .;			/* End of text section */
- 
-@@ -48,7 +54,7 @@
-   .data : AT(ADDR(.data) - LOAD_OFFSET) {	/* Data */
- 	*(.data)
- 	CONSTRUCTORS
--	}
-+	} :data
- 
-   . = ALIGN(4096);
-   __nosave_begin = .;
-@@ -184,4 +190,6 @@
-   STABS_DEBUG
+ 	/* Re-enable the IOCK line, wait for a few seconds */
+-	reason = (reason & 0xf) | 8;
+-	outb(reason, 0x61);
+-	i = 2000;
+-	while (--i) udelay(1000);
+-	reason &= ~8;
+-	outb(reason, 0x61);
++	clear_io_check_error(reason);
+ }
  
-   DWARF_DEBUG
+ static __kprobes void
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/traps-xen.c b/arch/i386/kernel/traps-xen.c
+--- a/arch/i386/kernel/traps-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/traps-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,1132 @@
++/*
++ *  linux/arch/i386/traps.c
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *
++ *  Pentium III FXSR, SSE support
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'asm.s'.
++ */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/highmem.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/kprobes.h>
++#include <linux/kexec.h>
++#include <linux/unwind.h>
++#include <linux/uaccess.h>
++#include <linux/nmi.h>
++#include <linux/bug.h>
 +
-+  NOTES
- }
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/kernel/vsyscall-note-xen.S linux-2.6.18-xen/arch/i386/kernel/vsyscall-note-xen.S
---- linux-2.6.18.3/arch/i386/kernel/vsyscall-note-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/kernel/vsyscall-note-xen.S	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,32 @@
-+/*
-+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
-+ * Here we can supply some information useful to userland.
-+ * First we get the vanilla i386 note that supplies the kernel version info.
-+ */
++#ifdef CONFIG_EISA
++#include <linux/ioport.h>
++#include <linux/eisa.h>
++#endif
 +
-+#include "vsyscall-note.S"
++#ifdef CONFIG_MCA
++#include <linux/mca.h>
++#endif
 +
-+/*
-+ * Now we add a special note telling glibc's dynamic linker a fake hardware
-+ * flavor that it will use to choose the search path for libraries in the
-+ * same way it uses real hardware capabilities like "mmx".
-+ * We supply "nosegneg" as the fake capability, to indicate that we
-+ * do not like negative offsets in instructions using segment overrides,
-+ * since we implement those inefficiently.  This makes it possible to
-+ * install libraries optimized to avoid those access patterns in someplace
-+ * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
-+ * corresponding to the bits here is needed to make ldconfig work right.
-+ * It should contain:
-+ *	hwcap 0 nosegneg
-+ * to match the mapping of bit to name that we give here.
-+ */
-+#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
-+	ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
-+	.long ncaps, mask
-+#define NOTE_KERNELCAP(bit, name) \
-+	.byte bit; .asciz name
-+#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/nmi.h>
++#include <asm/unwind.h>
++#include <asm/smp.h>
++#include <asm/arch_hooks.h>
++#include <asm/kdebug.h>
++#include <asm/stacktrace.h>
 +
-+NOTE_KERNELCAP_BEGIN(1, 1)
-+NOTE_KERNELCAP(1, "nosegneg")  /* Change 1 back to 0 when glibc is fixed! */
-+NOTE_KERNELCAP_END
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/lib/delay.c linux-2.6.18-xen/arch/i386/lib/delay.c
---- linux-2.6.18.3/arch/i386/lib/delay.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/lib/delay.c	2006-11-19 14:26:22.000000000 +0100
-@@ -61,6 +61,7 @@
- 	delay_fn = delay_tsc;
- }
- 
-+#ifndef CONFIG_X86_XEN
- int read_current_timer(unsigned long *timer_val)
- {
- 	if (delay_fn == delay_tsc) {
-@@ -69,7 +70,7 @@
- 	}
- 	return -1;
- }
--
-+#endif
- void __delay(unsigned long loops)
- {
- 	delay_fn(loops);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/mach-xen/irqflags.c linux-2.6.18-xen/arch/i386/mach-xen/irqflags.c
---- linux-2.6.18.3/arch/i386/mach-xen/irqflags.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/mach-xen/irqflags.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,99 @@
 +#include <linux/module.h>
-+#include <linux/smp.h>
-+#include <asm/irqflags.h>
-+#include <asm/hypervisor.h>
-+
-+/* interrupt control.. */
 +
-+/* 
-+ * The use of 'barrier' in the following reflects their use as local-lock
-+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
-+ * critical operations are executed. All critical operations must complete
-+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
-+ * includes these barriers, for example.
-+ */
++#include "mach_traps.h"
 +
-+unsigned long __raw_local_save_flags(void)
-+{
-+	struct vcpu_info *_vcpu;
-+	unsigned long flags;
++int panic_on_unrecovered_nmi;
 +
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	flags = _vcpu->evtchn_upcall_mask;
-+	preempt_enable();
++asmlinkage int system_call(void);
 +
-+	return flags;
-+}
-+EXPORT_SYMBOL(__raw_local_save_flags);
++/* Do we ignore FPU interrupts ? */
++char ignore_fpu_irq = 0;
 +
-+void raw_local_irq_restore(unsigned long flags)
-+{
-+	struct vcpu_info *_vcpu;
++#ifndef CONFIG_X86_NO_IDT
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
++#endif
 +
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	if ((_vcpu->evtchn_upcall_mask = flags) == 0) {
-+		barrier(); /* unmask then check (avoid races) */
-+		if (unlikely(_vcpu->evtchn_upcall_pending))
-+			force_evtchn_callback();
-+		preempt_enable();
-+	} else
-+		preempt_enable_no_resched();
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void alignment_check(void);
++#ifndef CONFIG_XEN
++asmlinkage void spurious_interrupt_bug(void);
++#else
++asmlinkage void fixup_4gb_segment(void);
++#endif
++asmlinkage void machine_check(void);
 +
-+}
-+EXPORT_SYMBOL(raw_local_irq_restore);
++int kstack_depth_to_print = 24;
++ATOMIC_NOTIFIER_HEAD(i386die_chain);
 +
-+void raw_local_irq_disable(void)
++int register_die_notifier(struct notifier_block *nb)
 +{
-+	struct vcpu_info *_vcpu;
-+
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	_vcpu->evtchn_upcall_mask = 1;
-+	preempt_enable_no_resched();
++	vmalloc_sync_all();
++	return atomic_notifier_chain_register(&i386die_chain, nb);
 +}
-+EXPORT_SYMBOL(raw_local_irq_disable);
++EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
 +
-+void raw_local_irq_enable(void)
++int unregister_die_notifier(struct notifier_block *nb)
 +{
-+	struct vcpu_info *_vcpu;
-+
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	_vcpu->evtchn_upcall_mask = 0;
-+	barrier(); /* unmask then check (avoid races) */
-+	if (unlikely(_vcpu->evtchn_upcall_pending))
-+		force_evtchn_callback();
-+	preempt_enable();
++	return atomic_notifier_chain_unregister(&i386die_chain, nb);
 +}
-+EXPORT_SYMBOL(raw_local_irq_enable);
++EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
 +
-+/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
-+int raw_irqs_disabled(void)
++static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
 +{
-+	struct vcpu_info *_vcpu;
-+	int disabled;
-+
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	disabled = (_vcpu->evtchn_upcall_mask != 0);
-+	preempt_enable_no_resched();
-+	return disabled;
++	return	p > (void *)tinfo &&
++		p < (void *)tinfo + THREAD_SIZE - 3;
 +}
-+EXPORT_SYMBOL(raw_irqs_disabled);
 +
-+unsigned long __raw_local_irq_save(void)
++static inline unsigned long print_context_stack(struct thread_info *tinfo,
++				unsigned long *stack, unsigned long ebp,
++				struct stacktrace_ops *ops, void *data)
 +{
-+	struct vcpu_info *_vcpu;
-+	unsigned long flags;
-+
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	flags = _vcpu->evtchn_upcall_mask;
-+	_vcpu->evtchn_upcall_mask = 1;
-+	preempt_enable_no_resched();
++	unsigned long addr;
 +
-+	return flags;
++#ifdef	CONFIG_FRAME_POINTER
++	while (valid_stack_ptr(tinfo, (void *)ebp)) {
++		unsigned long new_ebp;
++		addr = *(unsigned long *)(ebp + 4);
++		ops->address(data, addr);
++		/*
++		 * break out of recursive entries (such as
++		 * end_of_stack_stop_unwind_function). Also,
++		 * we can never allow a frame pointer to
++		 * move downwards!
++	 	 */
++	 	new_ebp = *(unsigned long *)ebp;
++		if (new_ebp <= ebp)
++			break;
++		ebp = new_ebp;
++	}
++#else
++	while (valid_stack_ptr(tinfo, stack)) {
++		addr = *stack++;
++		if (__kernel_text_address(addr))
++			ops->address(data, addr);
++	}
++#endif
++	return ebp;
 +}
-+EXPORT_SYMBOL(__raw_local_irq_save);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/mach-xen/Makefile linux-2.6.18-xen/arch/i386/mach-xen/Makefile
---- linux-2.6.18.3/arch/i386/mach-xen/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/mach-xen/Makefile	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,5 @@
-+#
-+# Makefile for the linux kernel.
-+#
 +
-+obj-y				:= setup.o irqflags.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/mach-xen/setup.c linux-2.6.18-xen/arch/i386/mach-xen/setup.c
---- linux-2.6.18.3/arch/i386/mach-xen/setup.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/mach-xen/setup.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,154 @@
-+/*
-+ *	Machine specific setup for generic
-+ */
++#define MSG(msg) ops->warning(data, msg)
 +
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/interrupt.h>
-+#include <linux/module.h>
-+#include <asm/acpi.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/e820.h>
-+#include <asm/setup.h>
-+#include <asm/fixmap.h>
++void dump_trace(struct task_struct *task, struct pt_regs *regs,
++	        unsigned long *stack,
++		struct stacktrace_ops *ops, void *data)
++{
++	unsigned long ebp = 0;
 +
-+#include <xen/interface/callback.h>
-+#include <xen/interface/memory.h>
++	if (!task)
++		task = current;
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+#define DEFAULT_SEND_IPI	(1)
-+#else
-+#define DEFAULT_SEND_IPI	(0)
++	if (!stack) {
++		unsigned long dummy;
++		stack = &dummy;
++		if (task && task != current)
++			stack = (unsigned long *)task->thread.esp;
++	}
++
++#ifdef CONFIG_FRAME_POINTER
++	if (!ebp) {
++		if (task == current) {
++			/* Grab ebp right from our regs */
++			asm ("movl %%ebp, %0" : "=r" (ebp) : );
++		} else {
++			/* ebp is the last reg pushed by switch_to */
++			ebp = *(unsigned long *) task->thread.esp;
++		}
++	}
 +#endif
 +
-+int no_broadcast=DEFAULT_SEND_IPI;
++	while (1) {
++		struct thread_info *context;
++		context = (struct thread_info *)
++			((unsigned long)stack & (~(THREAD_SIZE - 1)));
++		ebp = print_context_stack(context, stack, ebp, ops, data);
++		/* Should be after the line below, but somewhere
++		   in early boot context comes out corrupted and we
++		   can't reference it -AK */
++		if (ops->stack(data, "IRQ") < 0)
++			break;
++		stack = (unsigned long*)context->previous_esp;
++		if (!stack)
++			break;
++		touch_nmi_watchdog();
++	}
++}
++EXPORT_SYMBOL(dump_trace);
 +
-+static __init int no_ipi_broadcast(char *str)
++static void
++print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
 +{
-+	get_option(&str, &no_broadcast);
-+	printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
-+											"IPI Broadcast");
-+	return 1;
++	printk(data);
++	print_symbol(msg, symbol);
++	printk("\n");
 +}
 +
-+__setup("no_ipi_broadcast", no_ipi_broadcast);
++static void print_trace_warning(void *data, char *msg)
++{
++	printk("%s%s\n", (char *)data, msg);
++}
 +
-+static int __init print_ipi_mode(void)
++static int print_trace_stack(void *data, char *name)
 +{
-+	printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
-+											"Shortcut");
 +	return 0;
 +}
 +
-+late_initcall(print_ipi_mode);
-+
-+/**
-+ * machine_specific_memory_setup - Hook for machine specific memory setup.
-+ *
-+ * Description:
-+ *	This is included late in kernel/setup.c so that it can make
-+ *	use of all of the static functions.
-+ **/
-+
-+char * __init machine_specific_memory_setup(void)
++/*
++ * Print one address/symbol entries per line.
++ */
++static void print_trace_address(void *data, unsigned long addr)
 +{
-+	int rc;
-+	struct xen_memory_map memmap;
-+	/*
-+	 * This is rather large for a stack variable but this early in
-+	 * the boot process we know we have plenty slack space.
-+	 */
-+	struct e820entry map[E820MAX];
-+
-+	memmap.nr_entries = E820MAX;
-+	set_xen_guest_handle(memmap.buffer, map);
-+
-+	rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
-+	if ( rc == -ENOSYS ) {
-+		memmap.nr_entries = 1;
-+		map[0].addr = 0ULL;
-+		map[0].size = PFN_PHYS(xen_start_info->nr_pages);
-+		/* 8MB slack (to balance backend allocations). */
-+		map[0].size += 8ULL << 20;
-+		map[0].type = E820_RAM;
-+		rc = 0;
-+	}
-+	BUG_ON(rc);
-+
-+	sanitize_e820_map(map, (char *)&memmap.nr_entries);
-+
-+	BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
-+
-+	return "Xen";
++	printk("%s [<%08lx>] ", (char *)data, addr);
++	print_symbol("%s\n", addr);
 +}
 +
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void nmi(void);
-+
-+unsigned long *machine_to_phys_mapping;
-+EXPORT_SYMBOL(machine_to_phys_mapping);
-+unsigned int machine_to_phys_order;
-+EXPORT_SYMBOL(machine_to_phys_order);
++static struct stacktrace_ops print_trace_ops = {
++	.warning = print_trace_warning,
++	.warning_symbol = print_trace_warning_symbol,
++	.stack = print_trace_stack,
++	.address = print_trace_address,
++};
 +
-+void __init machine_specific_arch_setup(void)
++static void
++show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
++		   unsigned long * stack, char *log_lvl)
 +{
-+	int ret;
-+	struct xen_machphys_mapping mapping;
-+	unsigned long machine_to_phys_nr_ents;
-+	struct xen_platform_parameters pp;
-+	struct callback_register event = {
-+		.type = CALLBACKTYPE_event,
-+		.address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
-+	};
-+	struct callback_register failsafe = {
-+		.type = CALLBACKTYPE_failsafe,
-+		.address = { __KERNEL_CS, (unsigned long)failsafe_callback },
-+	};
-+	struct callback_register nmi_cb = {
-+		.type = CALLBACKTYPE_nmi,
-+		.address = { __KERNEL_CS, (unsigned long)nmi },
-+	};
++	dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
++	printk("%s =======================\n", log_lvl);
++}
 +
-+	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
-+	if (ret == 0)
-+		ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
-+	if (ret == -ENOSYS)
-+		ret = HYPERVISOR_set_callbacks(
-+			event.address.cs, event.address.eip,
-+			failsafe.address.cs, failsafe.address.eip);
-+	BUG_ON(ret);
++void show_trace(struct task_struct *task, struct pt_regs *regs,
++		unsigned long * stack)
++{
++	show_trace_log_lvl(task, regs, stack, "");
++}
 +
-+	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
-+	if (ret == -ENOSYS) {
-+		struct xennmi_callback cb;
++static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
++			       unsigned long *esp, char *log_lvl)
++{
++	unsigned long *stack;
++	int i;
 +
-+		cb.handler_address = nmi_cb.address.eip;
-+		HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++	if (esp == NULL) {
++		if (task)
++			esp = (unsigned long*)task->thread.esp;
++		else
++			esp = (unsigned long *)&esp;
 +	}
 +
-+	if (HYPERVISOR_xen_version(XENVER_platform_parameters,
-+				   &pp) == 0)
-+		set_fixaddr_top(pp.virt_start - PAGE_SIZE);
-+
-+	machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
-+	machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
-+	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
-+		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
-+		machine_to_phys_nr_ents = mapping.max_mfn + 1;
++	stack = esp;
++	for(i = 0; i < kstack_depth_to_print; i++) {
++		if (kstack_end(stack))
++			break;
++		if (i && ((i % 8) == 0))
++			printk("\n%s       ", log_lvl);
++		printk("%08lx ", *stack++);
 +	}
-+	while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
-+		machine_to_phys_order++;
++	printk("\n%sCall Trace:\n", log_lvl);
++	show_trace_log_lvl(task, regs, esp, log_lvl);
 +}
 +
-+/**
-+ * pre_setup_arch_hook - hook called prior to any setup_arch() execution
-+ *
-+ * Description:
-+ *	generally used to activate any machine specific identification
-+ *	routines that may be needed before setup_arch() runs.  On VISWS
-+ *	this is used to get the board revision and type.
-+ **/
-+void __init pre_setup_arch_hook(void)
++void show_stack(struct task_struct *task, unsigned long *esp)
 +{
++	printk("       ");
++	show_stack_log_lvl(task, NULL, esp, "");
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/Makefile linux-2.6.18-xen/arch/i386/Makefile
---- linux-2.6.18.3/arch/i386/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/Makefile	2006-11-19 14:26:21.000000000 +0100
-@@ -48,6 +48,11 @@
- 
- CFLAGS += $(cflags-y)
- 
-+cppflags-$(CONFIG_XEN) += \
-+	-D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
 +
-+CPPFLAGS += $(cppflags-y)
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++	unsigned long stack;
 +
- # Default subarch .c files
- mcore-y  := mach-default
- 
-@@ -71,6 +76,10 @@
- mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-i386/mach-summit
- mcore-$(CONFIG_X86_SUMMIT)  := mach-default
- 
-+# Xen subarch support
-+mflags-$(CONFIG_X86_XEN)	:= -Iinclude/asm-i386/mach-xen
-+mcore-$(CONFIG_X86_XEN)		:= mach-xen
++	show_trace(current, NULL, &stack);
++}
 +
- # generic subarchitecture
- mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-i386/mach-generic
- mcore-$(CONFIG_X86_GENERICARCH) := mach-default
-@@ -105,6 +114,19 @@
- PHONY += zImage bzImage compressed zlilo bzlilo \
-          zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
- 
-+ifdef CONFIG_XEN
-+CPPFLAGS := -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
-+head-y := arch/i386/kernel/head-xen.o arch/i386/kernel/init_task-xen.o
-+boot := arch/i386/boot-xen
-+.PHONY: vmlinuz
-+all: vmlinuz
++EXPORT_SYMBOL(dump_stack);
 +
-+vmlinuz: vmlinux
-+	$(Q)$(MAKE) $(build)=$(boot) $@
++void show_registers(struct pt_regs *regs)
++{
++	int i;
++	int in_kernel = 1;
++	unsigned long esp;
++	unsigned short ss;
 +
-+install:
-+	$(Q)$(MAKE) $(build)=$(boot) XENGUEST=$(XENGUEST) $@
-+else
- all: bzImage
- 
- # KBUILD_IMAGE specify target image being built
-@@ -127,6 +149,7 @@
- 
- install:
- 	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
-+endif
- 
- archclean:
- 	$(Q)$(MAKE) $(clean)=arch/i386/boot
-@@ -145,3 +168,4 @@
- CLEAN_FILES += arch/$(ARCH)/boot/fdimage \
- 	       arch/$(ARCH)/boot/image.iso \
- 	       arch/$(ARCH)/boot/mtools.conf
-+CLEAN_FILES += vmlinuz vmlinux-stripped
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/mm/fault-xen.c linux-2.6.18-xen/arch/i386/mm/fault-xen.c
---- linux-2.6.18.3/arch/i386/mm/fault-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/mm/fault-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,770 @@
-+/*
-+ *  linux/arch/i386/mm/fault.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ */
++	esp = (unsigned long) (&regs->esp);
++	savesegment(ss, ss);
++	if (user_mode_vm(regs)) {
++		in_kernel = 0;
++		esp = regs->esp;
++		ss = regs->xss & 0xffff;
++	}
++	print_modules();
++	printk(KERN_EMERG "CPU:    %d\n"
++		KERN_EMERG "EIP:    %04x:[<%08lx>]    %s VLI\n"
++		KERN_EMERG "EFLAGS: %08lx   (%s %.*s)\n",
++		smp_processor_id(), 0xffff & regs->xcs, regs->eip,
++		print_tainted(), regs->eflags, init_utsname()->release,
++		(int)strcspn(init_utsname()->version, " "),
++		init_utsname()->version);
++	print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
++	printk(KERN_EMERG "eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
++		regs->eax, regs->ebx, regs->ecx, regs->edx);
++	printk(KERN_EMERG "esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
++		regs->esi, regs->edi, regs->ebp, esp);
++	printk(KERN_EMERG "ds: %04x   es: %04x   ss: %04x\n",
++		regs->xds & 0xffff, regs->xes & 0xffff, ss);
++	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
++		TASK_COMM_LEN, current->comm, current->pid,
++		current_thread_info(), current, current->thread_info);
++	/*
++	 * When in-kernel, we also print out the stack and code at the
++	 * time of the fault..
++	 */
++	if (in_kernel) {
++		u8 *eip;
++		int code_bytes = 64;
++		unsigned char c;
 +
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/tty.h>
-+#include <linux/vt_kern.h>		/* For unblank_screen() */
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/kprobes.h>
++		printk("\n" KERN_EMERG "Stack: ");
++		show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
 +
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/desc.h>
-+#include <asm/kdebug.h>
++		printk(KERN_EMERG "Code: ");
 +
-+extern void die(const char *,struct pt_regs *,long);
++		eip = (u8 *)regs->eip - 43;
++		if (eip < (u8 *)PAGE_OFFSET ||
++			probe_kernel_address(eip, c)) {
++			/* try starting at EIP */
++			eip = (u8 *)regs->eip;
++			code_bytes = 32;
++		}
++		for (i = 0; i < code_bytes; i++, eip++) {
++			if (eip < (u8 *)PAGE_OFFSET ||
++				probe_kernel_address(eip, c)) {
++				printk(" Bad EIP value.");
++				break;
++			}
++			if (eip == (u8 *)regs->eip)
++				printk("<%02x> ", c);
++			else
++				printk("%02x ", c);
++		}
++	}
++	printk("\n");
++}	
 +
-+#ifdef CONFIG_KPROBES
-+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
-+int register_page_fault_notifier(struct notifier_block *nb)
++int is_valid_bugaddr(unsigned long eip)
 +{
-+	vmalloc_sync_all();
-+	return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
-+}
++	unsigned short ud2;
 +
-+int unregister_page_fault_notifier(struct notifier_block *nb)
-+{
-+	return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
-+}
++	if (eip < PAGE_OFFSET)
++		return 0;
++	if (probe_kernel_address((unsigned short *)eip, ud2))
++		return 0;
 +
-+static inline int notify_page_fault(enum die_val val, const char *str,
-+			struct pt_regs *regs, long err, int trap, int sig)
-+{
-+	struct die_args args = {
-+		.regs = regs,
-+		.str = str,
-+		.err = err,
-+		.trapnr = trap,
-+		.signr = sig
-+	};
-+	return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
-+}
-+#else
-+static inline int notify_page_fault(enum die_val val, const char *str,
-+			struct pt_regs *regs, long err, int trap, int sig)
-+{
-+	return NOTIFY_DONE;
++	return ud2 == 0x0b0f;
 +}
-+#endif
 +
 +/*
-+ * Unlock any spinlocks which will prevent us from getting the
-+ * message out 
++ * This is gone through when something in the kernel has done something bad and
++ * is about to be terminated.
 + */
-+void bust_spinlocks(int yes)
++void die(const char * str, struct pt_regs * regs, long err)
 +{
-+	int loglevel_save = console_loglevel;
++	static struct {
++		spinlock_t lock;
++		u32 lock_owner;
++		int lock_owner_depth;
++	} die = {
++		.lock =			__SPIN_LOCK_UNLOCKED(die.lock),
++		.lock_owner =		-1,
++		.lock_owner_depth =	0
++	};
++	static int die_counter;
++	unsigned long flags;
 +
-+	if (yes) {
-+		oops_in_progress = 1;
-+		return;
++	oops_enter();
++
++	if (die.lock_owner != raw_smp_processor_id()) {
++		console_verbose();
++		spin_lock_irqsave(&die.lock, flags);
++		die.lock_owner = smp_processor_id();
++		die.lock_owner_depth = 0;
++		bust_spinlocks(1);
 +	}
-+#ifdef CONFIG_VT
-+	unblank_screen();
++	else
++		local_save_flags(flags);
++
++	if (++die.lock_owner_depth < 3) {
++		int nl = 0;
++		unsigned long esp;
++		unsigned short ss;
++
++		report_bug(regs->eip);
++
++		printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
++#ifdef CONFIG_PREEMPT
++		printk(KERN_EMERG "PREEMPT ");
++		nl = 1;
 +#endif
-+	oops_in_progress = 0;
-+	/*
-+	 * OK, the message is on the console.  Now we call printk()
-+	 * without oops_in_progress set so that printk will give klogd
-+	 * a poke.  Hold onto your hats...
-+	 */
-+	console_loglevel = 15;		/* NMI oopser may have shut the console up */
-+	printk(" ");
-+	console_loglevel = loglevel_save;
++#ifdef CONFIG_SMP
++		if (!nl)
++			printk(KERN_EMERG);
++		printk("SMP ");
++		nl = 1;
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++		if (!nl)
++			printk(KERN_EMERG);
++		printk("DEBUG_PAGEALLOC");
++		nl = 1;
++#endif
++		if (nl)
++			printk("\n");
++		if (notify_die(DIE_OOPS, str, regs, err,
++					current->thread.trap_no, SIGSEGV) !=
++				NOTIFY_STOP) {
++			show_registers(regs);
++			/* Executive summary in case the oops scrolled away */
++			esp = (unsigned long) (&regs->esp);
++			savesegment(ss, ss);
++			if (user_mode(regs)) {
++				esp = regs->esp;
++				ss = regs->xss & 0xffff;
++			}
++			printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
++			print_symbol("%s", regs->eip);
++			printk(" SS:ESP %04x:%08lx\n", ss, esp);
++		}
++		else
++			regs = NULL;
++  	} else
++		printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
++
++	bust_spinlocks(0);
++	die.lock_owner = -1;
++	spin_unlock_irqrestore(&die.lock, flags);
++
++	if (!regs)
++		return;
++
++	if (kexec_should_crash(current))
++		crash_kexec(regs);
++
++	if (in_interrupt())
++		panic("Fatal exception in interrupt");
++
++	if (panic_on_oops)
++		panic("Fatal exception");
++
++	oops_exit();
++	do_exit(SIGSEGV);
 +}
 +
-+/*
-+ * Return EIP plus the CS segment base.  The segment limit is also
-+ * adjusted, clamped to the kernel/user address space (whichever is
-+ * appropriate), and returned in *eip_limit.
-+ *
-+ * The segment is checked, because it might have been changed by another
-+ * task between the original faulting instruction and here.
-+ *
-+ * If CS is no longer a valid code segment, or if EIP is beyond the
-+ * limit, or if it is a kernel address when CS is not a kernel segment,
-+ * then the returned value will be greater than *eip_limit.
-+ * 
-+ * This is slow, but is very rarely executed.
-+ */
-+static inline unsigned long get_segment_eip(struct pt_regs *regs,
-+					    unsigned long *eip_limit)
++static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
 +{
-+	unsigned long eip = regs->eip;
-+	unsigned seg = regs->xcs & 0xffff;
-+	u32 seg_ar, seg_limit, base, *desc;
++	if (!user_mode_vm(regs))
++		die(str, regs, err);
++}
 +
-+	/* Unlikely, but must come before segment checks. */
-+	if (unlikely(regs->eflags & VM_MASK)) {
-+		base = seg << 4;
-+		*eip_limit = base + 0xffff;
-+		return base + (eip & 0xffff);
++static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
++			      struct pt_regs * regs, long error_code,
++			      siginfo_t *info)
++{
++	struct task_struct *tsk = current;
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = trapnr;
++
++	if (regs->eflags & VM_MASK) {
++		if (vm86)
++			goto vm86_trap;
++		goto trap_signal;
 +	}
 +
-+	/* The standard kernel/user address space limit. */
-+	*eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
++	if (!user_mode(regs))
++		goto kernel_trap;
 +
-+	/* By far the most common cases. */
-+	if (likely(seg == __USER_CS || seg == GET_KERNEL_CS()))
-+		return eip;
++	trap_signal: {
++		if (info)
++			force_sig_info(signr, info, tsk);
++		else
++			force_sig(signr, tsk);
++		return;
++	}
 +
-+	/* Check the segment exists, is within the current LDT/GDT size,
-+	   that kernel/user (ring 0..3) has the appropriate privilege,
-+	   that it's a code segment, and get the limit. */
-+	__asm__ ("larl %3,%0; lsll %3,%1"
-+		 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
-+	if ((~seg_ar & 0x9800) || eip > seg_limit) {
-+		*eip_limit = 0;
-+		return 1;	 /* So that returned eip > *eip_limit. */
++	kernel_trap: {
++		if (!fixup_exception(regs))
++			die(str, regs, error_code);
++		return;
 +	}
 +
-+	/* Get the GDT/LDT descriptor base. 
-+	   When you look for races in this code remember that
-+	   LDT and other horrors are only used in user space. */
-+	if (seg & (1<<2)) {
-+		/* Must lock the LDT while reading it. */
-+		down(&current->mm->context.sem);
-+		desc = current->mm->context.ldt;
-+		desc = (void *)desc + (seg & ~7);
-+	} else {
-+		/* Must disable preemption while reading the GDT. */
-+ 		desc = (u32 *)get_cpu_gdt_table(get_cpu());
-+		desc = (void *)desc + (seg & ~7);
++	vm86_trap: {
++		int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
++		if (ret) goto trap_signal;
++		return;
 +	}
++}
 +
-+	/* Decode the code segment base from the descriptor */
-+	base = get_desc_base((unsigned long *)desc);
++#define DO_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
++}
 +
-+	if (seg & (1<<2)) { 
-+		up(&current->mm->context.sem);
-+	} else
-+		put_cpu();
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	siginfo_t info; \
++	info.si_signo = signr; \
++	info.si_errno = 0; \
++	info.si_code = sicode; \
++	info.si_addr = (void __user *)siaddr; \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
++}
 +
-+	/* Adjust EIP and segment limit, and clamp at the kernel limit.
-+	   It's legitimate for segments to wrap at 0xffffffff. */
-+	seg_limit += base;
-+	if (seg_limit < *eip_limit && seg_limit >= base)
-+		*eip_limit = seg_limit;
-+	return eip + base;
++#define DO_VM86_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
 +}
 +
-+/* 
-+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
-+ * Check that here and ignore it.
-+ */
-+static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
-+{ 
-+	unsigned long limit;
-+	unsigned long instr = get_segment_eip (regs, &limit);
-+	int scan_more = 1;
-+	int prefetch = 0; 
-+	int i;
++#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	siginfo_t info; \
++	info.si_signo = signr; \
++	info.si_errno = 0; \
++	info.si_code = sicode; \
++	info.si_addr = (void __user *)siaddr; \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
++}
 +
-+	for (i = 0; scan_more && i < 15; i++) { 
-+		unsigned char opcode;
-+		unsigned char instr_hi;
-+		unsigned char instr_lo;
++DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)
++#ifndef CONFIG_KPROBES
++DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
++#endif
++DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL,  "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
++DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
++DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
 +
-+		if (instr > limit)
-+			break;
-+		if (__get_user(opcode, (unsigned char __user *) instr))
-+			break; 
++fastcall void __kprobes do_general_protection(struct pt_regs * regs,
++					      long error_code)
++{
++	current->thread.error_code = error_code;
++	current->thread.trap_no = 13;
 +
-+		instr_hi = opcode & 0xf0; 
-+		instr_lo = opcode & 0x0f; 
-+		instr++;
++	if (regs->eflags & VM_MASK)
++		goto gp_in_vm86;
 +
-+		switch (instr_hi) { 
-+		case 0x20:
-+		case 0x30:
-+			/* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
-+			scan_more = ((instr_lo & 7) == 0x6);
-+			break;
-+			
-+		case 0x60:
-+			/* 0x64 thru 0x67 are valid prefixes in all modes. */
-+			scan_more = (instr_lo & 0xC) == 0x4;
-+			break;		
-+		case 0xF0:
-+			/* 0xF0, 0xF2, and 0xF3 are valid prefixes */
-+			scan_more = !instr_lo || (instr_lo>>1) == 1;
-+			break;			
-+		case 0x00:
-+			/* Prefetch instruction is 0x0F0D or 0x0F18 */
-+			scan_more = 0;
-+			if (instr > limit)
-+				break;
-+			if (__get_user(opcode, (unsigned char __user *) instr))
-+				break;
-+			prefetch = (instr_lo == 0xF) &&
-+				(opcode == 0x0D || opcode == 0x18);
-+			break;			
-+		default:
-+			scan_more = 0;
-+			break;
-+		} 
++	if (!user_mode(regs))
++		goto gp_in_kernel;
++
++	current->thread.error_code = error_code;
++	current->thread.trap_no = 13;
++	force_sig(SIGSEGV, current);
++	return;
++
++gp_in_vm86:
++	local_irq_enable();
++	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
++	return;
++
++gp_in_kernel:
++	if (!fixup_exception(regs)) {
++		if (notify_die(DIE_GPF, "general protection fault", regs,
++				error_code, 13, SIGSEGV) == NOTIFY_STOP)
++			return;
++		die("general protection fault", regs, error_code);
 +	}
-+	return prefetch;
 +}
 +
-+static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
-+			      unsigned long error_code)
++static __kprobes void
++mem_parity_error(unsigned char reason, struct pt_regs * regs)
 +{
-+	if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+		     boot_cpu_data.x86 >= 6)) {
-+		/* Catch an obscure case of prefetch inside an NX page. */
-+		if (nx_enabled && (error_code & 16))
-+			return 0;
-+		return __is_prefetch(regs, addr);
-+	}
-+	return 0;
-+} 
++	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
++		"CPU %d.\n", reason, smp_processor_id());
++	printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
++	if (panic_on_unrecovered_nmi)
++                panic("NMI: Not continuing");
 +
-+static noinline void force_sig_info_fault(int si_signo, int si_code,
-+	unsigned long address, struct task_struct *tsk)
-+{
-+	siginfo_t info;
++	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
 +
-+	info.si_signo = si_signo;
-+	info.si_errno = 0;
-+	info.si_code = si_code;
-+	info.si_addr = (void __user *)address;
-+	force_sig_info(si_signo, &info, tsk);
++	/* Clear and disable the memory parity error line. */
++	clear_mem_error(reason);
 +}
 +
-+fastcall void do_invalid_op(struct pt_regs *, unsigned long);
-+
-+#ifdef CONFIG_X86_PAE
-+static void dump_fault_path(unsigned long address)
++static __kprobes void
++io_check_error(unsigned char reason, struct pt_regs * regs)
 +{
-+	unsigned long *p, page;
-+	unsigned long mfn; 
++	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
++	show_registers(regs);
 +
-+	page = read_cr3();
-+	p  = (unsigned long *)__va(page);
-+	p += (address >> 30) * 2;
-+	printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
-+	if (p[0] & 1) {
-+		mfn  = (p[0] >> PAGE_SHIFT) | ((p[1] & 0x7) << 20); 
-+		page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
-+		p  = (unsigned long *)__va(page);
-+		address &= 0x3fffffff;
-+		p += (address >> 21) * 2;
-+		printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n", 
-+		       page, p[1], p[0]);
-+#ifndef CONFIG_HIGHPTE
-+		if (p[0] & 1) {
-+			mfn  = (p[0] >> PAGE_SHIFT) | ((p[1] & 0x7) << 20); 
-+			page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
-+			p  = (unsigned long *) __va(page);
-+			address &= 0x001fffff;
-+			p += (address >> 12) * 2;
-+			printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
-+			       page, p[1], p[0]);
-+		}
-+#endif
-+	}
++	/* Re-enable the IOCK line, wait for a few seconds */
++	clear_io_check_error(reason);
 +}
-+#else
-+static void dump_fault_path(unsigned long address)
-+{
-+	unsigned long page;
 +
-+	page = read_cr3();
-+	page = ((unsigned long *) __va(page))[address >> 22];
-+	if (oops_may_print())
-+		printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
-+		       machine_to_phys(page));
-+	/*
-+	 * We must not directly access the pte in the highpte
-+	 * case, the page table might be allocated in highmem.
-+	 * And lets rather not kmap-atomic the pte, just in case
-+	 * it's allocated already.
-+	 */
-+#ifndef CONFIG_HIGHPTE
-+	if ((page & 1) && oops_may_print()) {
-+		page &= PAGE_MASK;
-+		address &= 0x003ff000;
-+		page = machine_to_phys(page);
-+		page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
-+		printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
-+		       machine_to_phys(page));
++static __kprobes void
++unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{
++#ifdef CONFIG_MCA
++	/* Might actually be able to figure out what the guilty party
++	* is. */
++	if( MCA_bus ) {
++		mca_handle_nmi();
++		return;
 +	}
 +#endif
-+}
-+#endif
++	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
++		"CPU %d.\n", reason, smp_processor_id());
++	printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
++	if (panic_on_unrecovered_nmi)
++                panic("NMI: Not continuing");
 +
-+static int spurious_fault(struct pt_regs *regs,
-+			  unsigned long address,
-+			  unsigned long error_code)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
++	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
++}
 +
-+#ifdef CONFIG_XEN
-+	/* Faults in hypervisor area are never spurious. */
-+	if (address >= HYPERVISOR_VIRT_START)
-+		return 0;
-+#endif
++static DEFINE_SPINLOCK(nmi_print_lock);
 +
-+	/* Reserved-bit violation or user access to kernel space? */
-+	if (error_code & 0x0c)
-+		return 0;
++void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
++{
++	if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
++	    NOTIFY_STOP)
++		return;
 +
-+	pgd = init_mm.pgd + pgd_index(address);
-+	if (!pgd_present(*pgd))
-+		return 0;
++	spin_lock(&nmi_print_lock);
++	/*
++	* We are in trouble anyway, lets at least try
++	* to get a message out.
++	*/
++	bust_spinlocks(1);
++	printk(KERN_EMERG "%s", msg);
++	printk(" on CPU%d, eip %08lx, registers:\n",
++		smp_processor_id(), regs->eip);
++	show_registers(regs);
++	console_silent();
++	spin_unlock(&nmi_print_lock);
++	bust_spinlocks(0);
 +
-+	pud = pud_offset(pgd, address);
-+	if (!pud_present(*pud))
-+		return 0;
++	/* If we are in kernel we are probably nested up pretty bad
++	 * and might aswell get out now while we still can.
++	*/
++	if (!user_mode_vm(regs)) {
++		current->thread.trap_no = 2;
++		crash_kexec(regs);
++	}
 +
-+	pmd = pmd_offset(pud, address);
-+	if (!pmd_present(*pmd))
-+		return 0;
++	do_exit(SIGSEGV);
++}
 +
-+	pte = pte_offset_kernel(pmd, address);
-+	if (!pte_present(*pte))
-+		return 0;
-+	if ((error_code & 0x02) && !pte_write(*pte))
-+		return 0;
-+#ifdef CONFIG_X86_PAE
-+	if ((error_code & 0x10) && (pte_val(*pte) & _PAGE_NX))
-+		return 0;
++static __kprobes void default_do_nmi(struct pt_regs * regs)
++{
++	unsigned char reason = 0;
++
++	/* Only the BSP gets external NMIs from the system.  */
++	if (!smp_processor_id())
++		reason = get_nmi_reason();
++ 
++	if (!(reason & 0xc0)) {
++		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
++							== NOTIFY_STOP)
++			return;
++#ifdef CONFIG_X86_LOCAL_APIC
++		/*
++		 * Ok, so this is none of the documented NMI sources,
++		 * so it must be the NMI watchdog.
++		 */
++		if (nmi_watchdog_tick(regs, reason))
++			return;
++		if (!do_nmi_callback(regs, smp_processor_id()))
 +#endif
++			unknown_nmi_error(reason, regs);
 +
-+	return 1;
++		return;
++	}
++	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++		return;
++	if (reason & 0x80)
++		mem_parity_error(reason, regs);
++	if (reason & 0x40)
++		io_check_error(reason, regs);
++	/*
++	 * Reassert NMI in case it became active meanwhile
++	 * as it's edge-triggered.
++	 */
++	reassert_nmi();
 +}
 +
-+static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
++fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
 +{
-+	unsigned index = pgd_index(address);
-+	pgd_t *pgd_k;
-+	pud_t *pud, *pud_k;
-+	pmd_t *pmd, *pmd_k;
++	int cpu;
 +
-+	pgd += index;
-+	pgd_k = init_mm.pgd + index;
++	nmi_enter();
 +
-+	if (!pgd_present(*pgd_k))
-+		return NULL;
++	cpu = smp_processor_id();
 +
-+	/*
-+	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
-+	 * and redundant with the set_pmd() on non-PAE. As would
-+	 * set_pud.
-+	 */
++	++nmi_count(cpu);
 +
-+	pud = pud_offset(pgd, address);
-+	pud_k = pud_offset(pgd_k, address);
-+	if (!pud_present(*pud_k))
-+		return NULL;
++	default_do_nmi(regs);
 +
-+	pmd = pmd_offset(pud, address);
-+	pmd_k = pmd_offset(pud_k, address);
-+	if (!pmd_present(*pmd_k))
-+		return NULL;
-+	if (!pmd_present(*pmd))
-+#ifndef CONFIG_XEN
-+		set_pmd(pmd, *pmd_k);
-+#else
-+		/*
-+		 * When running on Xen we must launder *pmd_k through
-+		 * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
-+		 */
-+		set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
-+#endif
-+	else
-+		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
-+	return pmd_k;
++	nmi_exit();
 +}
 +
-+/*
-+ * Handle a fault on the vmalloc or module mapping area
-+ *
-+ * This assumes no large pages in there.
-+ */
-+static inline int vmalloc_fault(unsigned long address)
++#ifdef CONFIG_KPROBES
++fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
 +{
-+	unsigned long pgd_paddr;
-+	pmd_t *pmd_k;
-+	pte_t *pte_k;
-+	/*
-+	 * Synchronize this task's top level page-table
-+	 * with the 'reference' page table.
-+	 *
-+	 * Do _not_ use "current" here. We might be inside
-+	 * an interrupt in the middle of a task switch..
-+	 */
-+	pgd_paddr = read_cr3();
-+	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
-+	if (!pmd_k)
-+		return -1;
-+	pte_k = pte_offset_kernel(pmd_k, address);
-+	if (!pte_present(*pte_k))
-+		return -1;
-+	return 0;
++	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
++			== NOTIFY_STOP)
++		return;
++	/* This is an interrupt gate, because kprobes wants interrupts
++	disabled.  Normal trap handlers don't. */
++	restore_interrupts(regs);
++	do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
 +}
++#endif
 +
 +/*
-+ * This routine handles page faults.  It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
++ * Our handling of the processor debug registers is non-trivial.
++ * We do not clear them on entry and exit from the kernel. Therefore
++ * it is possible to get a watchpoint trap here from inside the kernel.
++ * However, the code in ./ptrace.c has ensured that the user can
++ * only set watchpoints on userspace addresses. Therefore the in-kernel
++ * watchpoint trap can only occur in code which is reading/writing
++ * from user space. Such code must not hold kernel locks (since it
++ * can equally take a page fault), therefore it is safe to call
++ * force_sig_info even though that claims and releases locks.
++ * 
++ * Code in ./signal.c ensures that the debug control register
++ * is restored before we deliver any signal, and therefore that
++ * user code runs with the correct debug control register even though
++ * we clear it here.
 + *
-+ * error_code:
-+ *	bit 0 == 0 means no page found, 1 means protection fault
-+ *	bit 1 == 0 means read, 1 means write
-+ *	bit 2 == 0 means kernel, 1 means user-mode
-+ *	bit 3 == 1 means use of reserved bit detected
-+ *	bit 4 == 1 means fault was an instruction fetch
++ * Being careful here means that we don't have to be as careful in a
++ * lot of more complicated places (task switching can be a bit lazy
++ * about restoring all the debug state, and ptrace doesn't have to
++ * find every occurrence of the TF bit that could be saved away even
++ * by user code)
 + */
-+fastcall void __kprobes do_page_fault(struct pt_regs *regs,
-+				      unsigned long error_code)
++fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
 +{
-+	struct task_struct *tsk;
-+	struct mm_struct *mm;
-+	struct vm_area_struct * vma;
-+	unsigned long address;
-+	int write, si_code;
++	unsigned int condition;
++	struct task_struct *tsk = current;
 +
-+	/* get the address */
-+        address = read_cr2();
++	get_debugreg(condition, 6);
 +
-+	/* Set the "privileged fault" bit to something sane. */
-+	error_code &= ~4;
-+	error_code |= (regs->xcs & 2) << 1;
-+	if (regs->eflags & X86_EFLAGS_VM)
-+		error_code |= 4;
++	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++					SIGTRAP) == NOTIFY_STOP)
++		return;
++	/* It's safe to allow irq's after DR6 has been saved */
++	if (regs->eflags & X86_EFLAGS_IF)
++		local_irq_enable();
 +
-+	tsk = current;
++	/* Mask out spurious debug traps due to lazy DR7 setting */
++	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++		if (!tsk->thread.debugreg[7])
++			goto clear_dr7;
++	}
 +
-+	si_code = SEGV_MAPERR;
++	if (regs->eflags & VM_MASK)
++		goto debug_vm86;
++
++	/* Save debug status register where ptrace can see it */
++	tsk->thread.debugreg[6] = condition;
 +
 +	/*
-+	 * We fault-in kernel-space virtual memory on-demand. The
-+	 * 'reference' page table is init_mm.pgd.
-+	 *
-+	 * NOTE! We MUST NOT take any locks for this case. We may
-+	 * be in an interrupt or a critical region, and should
-+	 * only copy the information from the master page table,
-+	 * nothing more.
-+	 *
-+	 * This verifies that the fault happens in kernel space
-+	 * (error_code & 4) == 0, and that the fault was not a
-+	 * protection error (error_code & 9) == 0.
++	 * Single-stepping through TF: make sure we ignore any events in
++	 * kernel space (but re-enable TF when returning to user mode).
 +	 */
-+	if (unlikely(address >= TASK_SIZE)) {
-+#ifdef CONFIG_XEN
-+		/* Faults in hypervisor area can never be patched up. */
-+		if (address >= HYPERVISOR_VIRT_START)
-+			goto bad_area_nosemaphore;
-+#endif
-+		if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
-+			return;
-+		/* Can take a spurious fault if mapping changes R/O -> R/W. */
-+		if (spurious_fault(regs, address, error_code))
-+			return;
-+		if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-+						SIGSEGV) == NOTIFY_STOP)
-+			return;
++	if (condition & DR_STEP) {
 +		/*
-+		 * Don't take the mm semaphore here. If we fixup a prefetch
-+		 * fault we could otherwise deadlock.
++		 * We already checked v86 mode above, so we can
++		 * check for kernel mode by just checking the CPL
++		 * of CS.
 +		 */
-+		goto bad_area_nosemaphore;
++		if (!user_mode(regs))
++			goto clear_TF_reenable;
 +	}
 +
-+	if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-+					SIGSEGV) == NOTIFY_STOP)
-+		return;
-+
-+	/* It's safe to allow irq's after cr2 has been saved and the vmalloc
-+	   fault has been handled. */
-+	if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
-+		local_irq_enable();
-+
-+	mm = tsk->mm;
++	/* Ok, finally something we can handle */
++	send_sigtrap(tsk, regs, error_code);
 +
-+	/*
-+	 * If we're in an interrupt, have no user context or are running in an
-+	 * atomic region then we must not take the fault..
++	/* Disable additional traps. They'll be re-enabled when
++	 * the signal is delivered.
 +	 */
-+	if (in_atomic() || !mm)
-+		goto bad_area_nosemaphore;
++clear_dr7:
++	set_debugreg(0, 7);
++	return;
 +
-+	/* When running in the kernel we expect faults to occur only to
-+	 * addresses in user space.  All other faults represent errors in the
-+	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an
-+	 * erroneous fault occurring in a code path which already holds mmap_sem
-+	 * we will deadlock attempting to validate the fault against the
-+	 * address space.  Luckily the kernel only validly references user
-+	 * space from well defined areas of code, which are listed in the
-+	 * exceptions table.
-+	 *
-+	 * As the vast majority of faults will be valid we will only perform
-+	 * the source reference check when there is a possibilty of a deadlock.
-+	 * Attempt to lock the address space, if we cannot we then validate the
-+	 * source.  If this is invalid we can skip the address space check,
-+	 * thus avoiding the deadlock.
-+	 */
-+	if (!down_read_trylock(&mm->mmap_sem)) {
-+		if ((error_code & 4) == 0 &&
-+		    !search_exception_tables(regs->eip))
-+			goto bad_area_nosemaphore;
-+		down_read(&mm->mmap_sem);
-+	}
++debug_vm86:
++	handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
++	return;
++
++clear_TF_reenable:
++	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++	regs->eflags &= ~TF_MASK;
++	return;
++}
 +
-+	vma = find_vma(mm, address);
-+	if (!vma)
-+		goto bad_area;
-+	if (vma->vm_start <= address)
-+		goto good_area;
-+	if (!(vma->vm_flags & VM_GROWSDOWN))
-+		goto bad_area;
-+	if (error_code & 4) {
-+		/*
-+		 * Accessing the stack below %esp is always a bug.
-+		 * The large cushion allows instructions like enter
-+		 * and pusha to work.  ("enter $65535,$31" pushes
-+		 * 32 pointers and then decrements %esp by 65535.)
-+		 */
-+		if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
-+			goto bad_area;
-+	}
-+	if (expand_stack(vma, address))
-+		goto bad_area;
 +/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
 + */
-+good_area:
-+	si_code = SEGV_ACCERR;
-+	write = 0;
-+	switch (error_code & 3) {
-+		default:	/* 3: write, present */
-+#ifdef TEST_VERIFY_AREA
-+			if (regs->cs == GET_KERNEL_CS())
-+				printk("WP fault at %08lx\n", regs->eip);
-+#endif
-+			/* fall through */
-+		case 2:		/* write, not present */
-+			if (!(vma->vm_flags & VM_WRITE))
-+				goto bad_area;
-+			write++;
-+			break;
-+		case 1:		/* read, present */
-+			goto bad_area;
-+		case 0:		/* read, not present */
-+			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
-+				goto bad_area;
-+	}
++void math_error(void __user *eip)
++{
++	struct task_struct * task;
++	siginfo_t info;
++	unsigned short cwd, swd;
 +
-+ survive:
 +	/*
-+	 * If for any reason at all we couldn't handle the fault,
-+	 * make sure we exit gracefully rather than endlessly redo
-+	 * the fault.
++	 * Save the info for the exception handler and clear the error.
 +	 */
-+	switch (handle_mm_fault(mm, vma, address, write)) {
-+		case VM_FAULT_MINOR:
-+			tsk->min_flt++;
-+			break;
-+		case VM_FAULT_MAJOR:
-+			tsk->maj_flt++;
-+			break;
-+		case VM_FAULT_SIGBUS:
-+			goto do_sigbus;
-+		case VM_FAULT_OOM:
-+			goto out_of_memory;
-+		default:
-+			BUG();
-+	}
-+
++	task = current;
++	save_init_fpu(task);
++	task->thread.trap_no = 16;
++	task->thread.error_code = 0;
++	info.si_signo = SIGFPE;
++	info.si_errno = 0;
++	info.si_code = __SI_FAULT;
++	info.si_addr = eip;
 +	/*
-+	 * Did it hit the DOS screen memory VA from vm86 mode?
++	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
++	 * status.  0x3f is the exception bits in these regs, 0x200 is the
++	 * C1 reg you need in case of a stack fault, 0x040 is the stack
++	 * fault bit.  We should only be taking one exception at a time,
++	 * so if this combination doesn't produce any single exception,
++	 * then we have a bad program that isn't syncronizing its FPU usage
++	 * and it will suffer the consequences since we won't be able to
++	 * fully reproduce the context of the exception
 +	 */
-+	if (regs->eflags & VM_MASK) {
-+		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
-+		if (bit < 32)
-+			tsk->thread.screen_bitmap |= 1 << bit;
++	cwd = get_fpu_cwd(task);
++	swd = get_fpu_swd(task);
++	switch (swd & ~cwd & 0x3f) {
++		case 0x000: /* No unmasked exception */
++			return;
++		default:    /* Multiple exceptions */
++			break;
++		case 0x001: /* Invalid Op */
++			/*
++			 * swd & 0x240 == 0x040: Stack Underflow
++			 * swd & 0x240 == 0x240: Stack Overflow
++			 * User must clear the SF bit (0x40) if set
++			 */
++			info.si_code = FPE_FLTINV;
++			break;
++		case 0x002: /* Denormalize */
++		case 0x010: /* Underflow */
++			info.si_code = FPE_FLTUND;
++			break;
++		case 0x004: /* Zero Divide */
++			info.si_code = FPE_FLTDIV;
++			break;
++		case 0x008: /* Overflow */
++			info.si_code = FPE_FLTOVF;
++			break;
++		case 0x020: /* Precision */
++			info.si_code = FPE_FLTRES;
++			break;
 +	}
-+	up_read(&mm->mmap_sem);
-+	return;
-+
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+	up_read(&mm->mmap_sem);
++	force_sig_info(SIGFPE, &info, task);
++}
 +
-+bad_area_nosemaphore:
-+	/* User mode accesses just cause a SIGSEGV */
-+	if (error_code & 4) {
-+		/* 
-+		 * Valid to do another page fault here because this one came 
-+		 * from user space.
-+		 */
-+		if (is_prefetch(regs, address, error_code))
-+			return;
++fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
++{
++	ignore_fpu_irq = 1;
++	math_error((void __user *)regs->eip);
++}
 +
-+		tsk->thread.cr2 = address;
-+		/* Kernel addresses are always protection faults */
-+		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-+		tsk->thread.trap_no = 14;
-+		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
-+		return;
-+	}
++static void simd_math_error(void __user *eip)
++{
++	struct task_struct * task;
++	siginfo_t info;
++	unsigned short mxcsr;
 +
-+#ifdef CONFIG_X86_F00F_BUG
 +	/*
-+	 * Pentium F0 0F C7 C8 bug workaround.
++	 * Save the info for the exception handler and clear the error.
 +	 */
-+	if (boot_cpu_data.f00f_bug) {
-+		unsigned long nr;
-+		
-+		nr = (address - idt_descr.address) >> 3;
-+
-+		if (nr == 6) {
-+			do_invalid_op(regs, 0);
-+			return;
-+		}
-+	}
-+#endif
-+
-+no_context:
-+	/* Are we prepared to handle this kernel fault?  */
-+	if (fixup_exception(regs))
-+		return;
-+
-+	/* 
-+	 * Valid to do another page fault here, because if this fault
-+	 * had been triggered by is_prefetch fixup_exception would have 
-+	 * handled it.
++	task = current;
++	save_init_fpu(task);
++	task->thread.trap_no = 19;
++	task->thread.error_code = 0;
++	info.si_signo = SIGFPE;
++	info.si_errno = 0;
++	info.si_code = __SI_FAULT;
++	info.si_addr = eip;
++	/*
++	 * The SIMD FPU exceptions are handled a little differently, as there
++	 * is only a single status/control register.  Thus, to determine which
++	 * unmasked exception was caught we must mask the exception mask bits
++	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
 +	 */
-+ 	if (is_prefetch(regs, address, error_code))
-+ 		return;
-+
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
-+ */
-+
-+	bust_spinlocks(1);
-+
-+	if (oops_may_print()) {
-+	#ifdef CONFIG_X86_PAE
-+		if (error_code & 16) {
-+			pte_t *pte = lookup_address(address);
-+
-+			if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
-+				printk(KERN_CRIT "kernel tried to execute "
-+					"NX-protected page - exploit attempt? "
-+					"(uid: %d)\n", current->uid);
-+		}
-+	#endif
-+		if (address < PAGE_SIZE)
-+			printk(KERN_ALERT "BUG: unable to handle kernel NULL "
-+					"pointer dereference");
-+		else
-+			printk(KERN_ALERT "BUG: unable to handle kernel paging"
-+					" request");
-+		printk(" at virtual address %08lx\n",address);
-+		printk(KERN_ALERT " printing eip:\n");
-+		printk("%08lx\n", regs->eip);
-+		dump_fault_path(address);
-+	}
-+	tsk->thread.cr2 = address;
-+	tsk->thread.trap_no = 14;
-+	tsk->thread.error_code = error_code;
-+	die("Oops", regs, error_code);
-+	bust_spinlocks(0);
-+	do_exit(SIGKILL);
-+
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+	up_read(&mm->mmap_sem);
-+	if (tsk->pid == 1) {
-+		yield();
-+		down_read(&mm->mmap_sem);
-+		goto survive;
++	mxcsr = get_fpu_mxcsr(task);
++	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++		case 0x000:
++		default:
++			break;
++		case 0x001: /* Invalid Op */
++			info.si_code = FPE_FLTINV;
++			break;
++		case 0x002: /* Denormalize */
++		case 0x010: /* Underflow */
++			info.si_code = FPE_FLTUND;
++			break;
++		case 0x004: /* Zero Divide */
++			info.si_code = FPE_FLTDIV;
++			break;
++		case 0x008: /* Overflow */
++			info.si_code = FPE_FLTOVF;
++			break;
++		case 0x020: /* Precision */
++			info.si_code = FPE_FLTRES;
++			break;
 +	}
-+	printk("VM: killing process %s\n", tsk->comm);
-+	if (error_code & 4)
-+		do_exit(SIGKILL);
-+	goto no_context;
-+
-+do_sigbus:
-+	up_read(&mm->mmap_sem);
-+
-+	/* Kernel mode? Handle exceptions or die */
-+	if (!(error_code & 4))
-+		goto no_context;
-+
-+	/* User space => ok to do another page fault */
-+	if (is_prefetch(regs, address, error_code))
-+		return;
-+
-+	tsk->thread.cr2 = address;
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = 14;
-+	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++	force_sig_info(SIGFPE, &info, task);
 +}
 +
-+#ifndef CONFIG_X86_PAE
-+void vmalloc_sync_all(void)
++fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
++					  long error_code)
 +{
-+	/*
-+	 * Note that races in the updates of insync and start aren't
-+	 * problematic: insync can only get set bits added, and updates to
-+	 * start are only improving performance (without affecting correctness
-+	 * if undone).
-+	 */
-+	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
-+	static unsigned long start = TASK_SIZE;
-+	unsigned long address;
-+
-+	BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
-+	for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
-+		if (!test_bit(pgd_index(address), insync)) {
-+			unsigned long flags;
-+			struct page *page;
-+
-+			spin_lock_irqsave(&pgd_lock, flags);
-+			for (page = pgd_list; page; page =
-+					(struct page *)page->index)
-+				if (!vmalloc_sync_one(page_address(page),
-+								address)) {
-+					BUG_ON(page != pgd_list);
-+					break;
-+				}
-+			spin_unlock_irqrestore(&pgd_lock, flags);
-+			if (!page)
-+				set_bit(pgd_index(address), insync);
++	if (cpu_has_xmm) {
++		/* Handle SIMD FPU exceptions on PIII+ processors. */
++		ignore_fpu_irq = 1;
++		simd_math_error((void __user *)regs->eip);
++	} else {
++		/*
++		 * Handle strange cache flush from user space exception
++		 * in all other cases.  This is undocumented behaviour.
++		 */
++		if (regs->eflags & VM_MASK) {
++			handle_vm86_fault((struct kernel_vm86_regs *)regs,
++					  error_code);
++			return;
 +		}
-+		if (address == start && test_bit(pgd_index(address), insync))
-+			start = address + PGDIR_SIZE;
++		current->thread.trap_no = 19;
++		current->thread.error_code = error_code;
++		die_if_kernel("cache flush denied", regs, error_code);
++		force_sig(SIGSEGV, current);
 +	}
 +}
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/mm/highmem-xen.c linux-2.6.18-xen/arch/i386/mm/highmem-xen.c
---- linux-2.6.18.3/arch/i386/mm/highmem-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/mm/highmem-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,133 @@
-+#include <linux/highmem.h>
-+#include <linux/module.h>
 +
-+void *kmap(struct page *page)
++#ifndef CONFIG_XEN
++fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
++					  long error_code)
 +{
-+	might_sleep();
-+	if (!PageHighMem(page))
-+		return page_address(page);
-+	return kmap_high(page);
++#if 0
++	/* No need to warn about this any longer. */
++	printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
++#endif
 +}
 +
-+void kunmap(struct page *page)
++fastcall unsigned long patch_espfix_desc(unsigned long uesp,
++					  unsigned long kesp)
 +{
-+	if (in_interrupt())
-+		BUG();
-+	if (!PageHighMem(page))
-+		return;
-+	kunmap_high(page);
++	int cpu = smp_processor_id();
++	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++	struct desc_struct *gdt = (struct desc_struct *)cpu_gdt_descr->address;
++	unsigned long base = (kesp - uesp) & -THREAD_SIZE;
++	unsigned long new_kesp = kesp - base;
++	unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
++	__u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
++	/* Set up base for espfix segment */
++ 	desc &= 0x00f0ff0000000000ULL;
++ 	desc |=	((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
++		((((__u64)base) << 32) & 0xff00000000000000ULL) |
++		((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
++		(lim_pages & 0xffff);
++	*(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
++	return new_kesp;
 +}
++#endif
 +
 +/*
-+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
-+ * no global lock is needed and because the kmap code must perform a global TLB
-+ * invalidation when the kmap pool wraps.
++ *  'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
 + *
-+ * However when holding an atomic kmap is is not legal to sleep, so atomic
-+ * kmaps are appropriate for short, tight code paths only.
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ *
++ * Must be called with kernel preemption disabled (in this case,
++ * local interrupts are disabled at the call-site in entry.S).
 + */
-+static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
++asmlinkage void math_state_restore(void)
 +{
-+	enum fixed_addresses idx;
-+	unsigned long vaddr;
-+
-+	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+	inc_preempt_count();
-+	if (!PageHighMem(page))
-+		return page_address(page);
-+
-+	idx = type + KM_TYPE_NR*smp_processor_id();
-+	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+#ifdef CONFIG_DEBUG_HIGHMEM
-+	if (!pte_none(*(kmap_pte-idx)))
-+		BUG();
-+#endif
-+	set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
++	struct thread_info *thread = current_thread_info();
++	struct task_struct *tsk = thread->task;
 +
-+	return (void*) vaddr;
++	/* NB. 'clts' is done for us by Xen during virtual trap. */
++	if (!tsk_used_math(tsk))
++		init_fpu(tsk);
++	restore_fpu(tsk);
++	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
++	tsk->fpu_counter++;
 +}
 +
-+void *kmap_atomic(struct page *page, enum km_type type)
-+{
-+	return __kmap_atomic(page, type, kmap_prot);
-+}
++#ifndef CONFIG_MATH_EMULATION
 +
-+/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
-+void *kmap_atomic_pte(struct page *page, enum km_type type)
++asmlinkage void math_emulate(long arg)
 +{
-+	return __kmap_atomic(page, type, PAGE_KERNEL_RO);
++	printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
++	printk(KERN_EMERG "killing %s.\n",current->comm);
++	force_sig(SIGFPE,current);
++	schedule();
 +}
 +
-+void kunmap_atomic(void *kvaddr, enum km_type type)
-+{
-+#if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
-+	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-+	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-+
-+	if (vaddr < FIXADDR_START) { // FIXME
-+		dec_preempt_count();
-+		preempt_check_resched();
-+		return;
-+	}
-+#endif
++#endif /* CONFIG_MATH_EMULATION */
 +
-+#if defined(CONFIG_DEBUG_HIGHMEM)
-+	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
-+		BUG();
++#ifdef CONFIG_X86_F00F_BUG
++void __init trap_init_f00f_bug(void)
++{
++	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
 +
 +	/*
-+	 * force other mappings to Oops if they'll try to access
-+	 * this pte without first remap it
-+	 */
-+	pte_clear(&init_mm, vaddr, kmap_pte-idx);
-+	__flush_tlb_one(vaddr);
-+#elif defined(CONFIG_XEN)
-+	/*
-+	 * We must ensure there are no dangling pagetable references when
-+	 * returning memory to Xen (decrease_reservation).
-+	 * XXX TODO: We could make this faster by only zapping when
-+	 * kmap_flush_unused is called but that is trickier and more invasive.
++	 * Update the IDT descriptor and reload the IDT so that
++	 * it uses the read-only mapped virtual address.
 +	 */
-+	pte_clear(&init_mm, vaddr, kmap_pte-idx);
-+#endif
-+
-+	dec_preempt_count();
-+	preempt_check_resched();
-+}
-+
-+/* This is the same as kmap_atomic() but can map memory that doesn't
-+ * have a struct page associated with it.
-+ */
-+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
-+{
-+	enum fixed_addresses idx;
-+	unsigned long vaddr;
-+
-+	inc_preempt_count();
-+
-+	idx = type + KM_TYPE_NR*smp_processor_id();
-+	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+	set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
-+	__flush_tlb_one(vaddr);
-+
-+	return (void*) vaddr;
++	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++	load_idt(&idt_descr);
 +}
++#endif
 +
-+struct page *kmap_atomic_to_page(void *ptr)
-+{
-+	unsigned long idx, vaddr = (unsigned long)ptr;
-+	pte_t *pte;
-+
-+	if (vaddr < FIXADDR_START)
-+		return virt_to_page(ptr);
-+
-+	idx = virt_to_fix(vaddr);
-+	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
-+	return pte_page(*pte);
-+}
 +
-+EXPORT_SYMBOL(kmap);
-+EXPORT_SYMBOL(kunmap);
-+EXPORT_SYMBOL(kmap_atomic);
-+EXPORT_SYMBOL(kunmap_atomic);
-+EXPORT_SYMBOL(kmap_atomic_to_page);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/mm/hypervisor.c linux-2.6.18-xen/arch/i386/mm/hypervisor.c
---- linux-2.6.18.3/arch/i386/mm/hypervisor.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/mm/hypervisor.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,457 @@
-+/******************************************************************************
-+ * mm/hypervisor.c
-+ * 
-+ * Update page tables via the hypervisor.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
++/*
++ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
++ * for those that specify <dpl>|4 in the second field.
 + */
-+
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/vmalloc.h>
-+#include <asm/page.h>
-+#include <asm/pgtable.h>
-+#include <asm/hypervisor.h>
-+#include <xen/balloon.h>
-+#include <xen/features.h>
-+#include <xen/interface/memory.h>
-+#include <linux/module.h>
-+#include <linux/percpu.h>
-+#include <asm/tlbflush.h>
-+
-+#ifdef CONFIG_X86_64
-+#define pmd_val_ma(v) (v).pmd
-+#else
-+#ifdef CONFIG_X86_PAE
-+# define pmd_val_ma(v) ((v).pmd)
-+# define pud_val_ma(v) ((v).pgd.pgd)
-+#else
-+# define pmd_val_ma(v) ((v).pud.pgd.pgd)
-+#endif
++static trap_info_t trap_table[] = {
++	{  0, 0, __KERNEL_CS, (unsigned long)divide_error		},
++	{  1, 0|4, __KERNEL_CS, (unsigned long)debug			},
++	{  3, 3|4, __KERNEL_CS, (unsigned long)int3			},
++	{  4, 3, __KERNEL_CS, (unsigned long)overflow			},
++	{  5, 0, __KERNEL_CS, (unsigned long)bounds			},
++	{  6, 0, __KERNEL_CS, (unsigned long)invalid_op			},
++	{  7, 0|4, __KERNEL_CS, (unsigned long)device_not_available	},
++	{  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
++	{ 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS		},
++	{ 11, 0, __KERNEL_CS, (unsigned long)segment_not_present	},
++	{ 12, 0, __KERNEL_CS, (unsigned long)stack_segment		},
++	{ 13, 0, __KERNEL_CS, (unsigned long)general_protection		},
++	{ 14, 0|4, __KERNEL_CS, (unsigned long)page_fault		},
++	{ 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment		},
++	{ 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error		},
++	{ 17, 0, __KERNEL_CS, (unsigned long)alignment_check		},
++#ifdef CONFIG_X86_MCE
++	{ 18, 0, __KERNEL_CS, (unsigned long)machine_check		},
 +#endif
++	{ 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error	},
++	{ SYSCALL_VECTOR,  3, __KERNEL_CS, (unsigned long)system_call	},
++	{  0, 0,	   0, 0						}
++};
 +
-+void xen_l1_entry_update(pte_t *ptr, pte_t val)
++void __init trap_init(void)
 +{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = pte_val_ma(val);
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
++	HYPERVISOR_set_trap_table(trap_table);
 +
-+void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
-+{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = pmd_val_ma(val);
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
++	if (cpu_has_fxsr) {
++		/*
++		 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
++		 * Generates a compile-time "error: zero width for bit-field" if
++		 * the alignment is wrong.
++		 */
++		struct fxsrAlignAssert {
++			int _:!(offsetof(struct task_struct,
++					thread.i387.fxsave) & 15);
++		};
 +
-+#ifdef CONFIG_X86_PAE
-+void xen_l3_entry_update(pud_t *ptr, pud_t val)
-+{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = pud_val_ma(val);
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+#endif
++		printk(KERN_INFO "Enabling fast FPU save and restore... ");
++		set_in_cr4(X86_CR4_OSFXSR);
++		printk("done.\n");
++	}
++	if (cpu_has_xmm) {
++		printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
++				"support... ");
++		set_in_cr4(X86_CR4_OSXMMEXCPT);
++		printk("done.\n");
++	}
 +
-+#ifdef CONFIG_X86_64
-+void xen_l3_entry_update(pud_t *ptr, pud_t val)
-+{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = val.pud;
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++	/*
++	 * Should be a barrier for any external CPU state.
++	 */
++	cpu_init();
 +}
 +
-+void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
++void smp_trap_init(trap_info_t *trap_ctxt)
 +{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = val.pgd;
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+#endif /* CONFIG_X86_64 */
++	trap_info_t *t = trap_table;
 +
-+void xen_machphys_update(unsigned long mfn, unsigned long pfn)
-+{
-+	mmu_update_t u;
-+	if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+		BUG_ON(pfn != mfn);
-+		return;
++	for (t = trap_table; t->address; t++) {
++		trap_ctxt[t->vector].flags = t->flags;
++		trap_ctxt[t->vector].cs = t->cs;
++		trap_ctxt[t->vector].address = t->address;
 +	}
-+	u.ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-+	u.val = pfn;
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 +}
 +
-+void xen_pt_switch(unsigned long ptr)
++static int __init kstack_setup(char *s)
 +{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_NEW_BASEPTR;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++	kstack_depth_to_print = simple_strtoul(s, NULL, 0);
++	return 1;
 +}
++__setup("kstack=", kstack_setup);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
+--- a/arch/i386/kernel/tsc.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/tsc.c	2007-03-14 10:55:14.000000000 +0100
+@@ -100,6 +100,7 @@
+ 	return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+ }
+ 
++#ifndef CONFIG_XEN
+ /*
+  * Scheduler clock - returns current time in nanosec units.
+  */
+@@ -123,6 +124,7 @@
+ 	/* return the value in ns */
+ 	return cycles_2_ns(this_offset);
+ }
++#endif
+ 
+ static unsigned long calculate_cpu_khz(void)
+ {
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
+--- a/arch/i386/kernel/vm86.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/vm86.c	2007-03-14 10:55:14.000000000 +0100
+@@ -125,7 +125,9 @@
+ struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
+ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ 	struct tss_struct *tss;
++#endif
+ 	struct pt_regs *ret;
+ 	unsigned long tmp;
+ 
+@@ -148,12 +150,16 @@
+ 		do_exit(SIGSEGV);
+ 	}
+ 
++#ifndef CONFIG_X86_NO_TSS
+ 	tss = &per_cpu(init_tss, get_cpu());
++#endif
+ 	current->thread.esp0 = current->thread.saved_esp0;
+ 	current->thread.sysenter_cs = __KERNEL_CS;
+ 	load_esp0(tss, &current->thread);
+ 	current->thread.saved_esp0 = 0;
++#ifndef CONFIG_X86_NO_TSS
+ 	put_cpu();
++#endif
+ 
+ 	ret = KVM86->regs32;
+ 
+@@ -279,7 +285,9 @@
+ 
+ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ 	struct tss_struct *tss;
++#endif
+ /*
+  * make sure the vm86() system call doesn't try to do anything silly
+  */
+@@ -324,12 +332,16 @@
+ 	savesegment(fs, tsk->thread.saved_fs);
+ 	tsk->thread.saved_gs = info->regs32->xgs;
+ 
++#ifndef CONFIG_X86_NO_TSS
+ 	tss = &per_cpu(init_tss, get_cpu());
++#endif
+ 	tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ 	if (cpu_has_sep)
+ 		tsk->thread.sysenter_cs = 0;
+ 	load_esp0(tss, &tsk->thread);
++#ifndef CONFIG_X86_NO_TSS
+ 	put_cpu();
++#endif
+ 
+ 	tsk->thread.screen_bitmap = info->screen_bitmap;
+ 	if (info->flags & VM86_SCREEN_BITMAP)
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
+--- a/arch/i386/kernel/vmlinux.lds.S	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/kernel/vmlinux.lds.S	2007-03-14 10:55:14.000000000 +0100
+@@ -35,7 +35,13 @@
+ }
+ SECTIONS
+ {
++/* xen i386 redefineds LOAD_OFFSET to zero on page.h
++   quintela at redhat.com */
++#ifdef CONFIG_XEN
++  . = __PAGE_OFFSET + LOAD_PHYSICAL_ADDR;
++#else
+   . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
++#endif
+   phys_startup_32 = startup_32 - LOAD_OFFSET;
+   /* read-only */
+   .text : AT(ADDR(.text) - LOAD_OFFSET) {
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/kernel/vsyscall-note-xen.S b/arch/i386/kernel/vsyscall-note-xen.S
+--- a/arch/i386/kernel/vsyscall-note-xen.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/kernel/vsyscall-note-xen.S	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,32 @@
++/*
++ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
++ * Here we can supply some information useful to userland.
++ * First we get the vanilla i386 note that supplies the kernel version info.
++ */
 +
-+void xen_new_user_pt(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_NEW_USER_BASEPTR;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
++#include "vsyscall-note.S"
 +
-+void xen_tlb_flush(void)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+EXPORT_SYMBOL(xen_tlb_flush);
++/*
++ * Now we add a special note telling glibc's dynamic linker a fake hardware
++ * flavor that it will use to choose the search path for libraries in the
++ * same way it uses real hardware capabilities like "mmx".
++ * We supply "nosegneg" as the fake capability, to indicate that we
++ * do not like negative offsets in instructions using segment overrides,
++ * since we implement those inefficiently.  This makes it possible to
++ * install libraries optimized to avoid those access patterns in someplace
++ * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
++ * corresponding to the bits here is needed to make ldconfig work right.
++ * It should contain:
++ *	hwcap 0 nosegneg
++ * to match the mapping of bit to name that we give here.
++ */
++#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
++	ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
++	.long ncaps, mask
++#define NOTE_KERNELCAP(bit, name) \
++	.byte bit; .asciz name
++#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
++
++NOTE_KERNELCAP_BEGIN(1, 1)
++NOTE_KERNELCAP(1, "nosegneg")  /* Change 1 back to 0 when glibc is fixed! */
++NOTE_KERNELCAP_END
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/lib/delay.c b/arch/i386/lib/delay.c
+--- a/arch/i386/lib/delay.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/lib/delay.c	2007-03-14 10:55:14.000000000 +0100
+@@ -60,6 +60,7 @@
+ 	delay_fn = delay_tsc;
+ }
+ 
++#ifndef CONFIG_X86_XEN
+ int read_current_timer(unsigned long *timer_val)
+ {
+ 	if (delay_fn == delay_tsc) {
+@@ -68,7 +69,7 @@
+ 	}
+ 	return -1;
+ }
+-
++#endif
+ void __delay(unsigned long loops)
+ {
+ 	delay_fn(loops);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/mach-xen/irqflags.c b/arch/i386/mach-xen/irqflags.c
+--- a/arch/i386/mach-xen/irqflags.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/mach-xen/irqflags.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,103 @@
++#include <linux/module.h>
++#include <linux/smp.h>
++#include <asm/irqflags.h>
++#include <asm/hypervisor.h>
 +
-+void xen_invlpg(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_INVLPG_LOCAL;
-+	op.arg1.linear_addr = ptr & PAGE_MASK;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+EXPORT_SYMBOL(xen_invlpg);
++/* interrupt control.. */
 +
-+#ifdef CONFIG_SMP
++/* 
++ * The use of 'barrier' in the following reflects their use as local-lock
++ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
++ * critical operations are executed. All critical operations must complete
++ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
++ * includes these barriers, for example.
++ */
 +
-+void xen_tlb_flush_all(void)
++unsigned long __raw_local_save_flags(void)
 +{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_TLB_FLUSH_ALL;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
++	struct vcpu_info *_vcpu;
++	unsigned long flags;
 +
-+void xen_tlb_flush_mask(cpumask_t *mask)
-+{
-+	struct mmuext_op op;
-+	if ( cpus_empty(*mask) )
-+		return;
-+	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-+	op.arg2.vcpumask = mask->bits;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
++	preempt_disable();
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
++	flags = _vcpu->evtchn_upcall_mask;
++	preempt_enable();
 +
-+void xen_invlpg_all(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_INVLPG_ALL;
-+	op.arg1.linear_addr = ptr & PAGE_MASK;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++	return flags;
 +}
++EXPORT_SYMBOL(__raw_local_save_flags);
 +
-+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
++void raw_local_irq_restore(unsigned long flags)
 +{
-+	struct mmuext_op op;
-+	if ( cpus_empty(*mask) )
-+		return;
-+	op.cmd = MMUEXT_INVLPG_MULTI;
-+	op.arg1.linear_addr = ptr & PAGE_MASK;
-+	op.arg2.vcpumask    = mask->bits;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+#endif /* CONFIG_SMP */
++	struct vcpu_info *_vcpu;
 +
-+void xen_pgd_pin(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+#ifdef CONFIG_X86_64
-+	op.cmd = MMUEXT_PIN_L4_TABLE;
-+#elif defined(CONFIG_X86_PAE)
-+	op.cmd = MMUEXT_PIN_L3_TABLE;
-+#else
-+	op.cmd = MMUEXT_PIN_L2_TABLE;
-+#endif
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++	preempt_disable();
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
++	if ((_vcpu->evtchn_upcall_mask = flags) == 0) {
++		barrier(); /* unmask then check (avoid races) */
++		if (unlikely(_vcpu->evtchn_upcall_pending))
++			force_evtchn_callback();
++		preempt_enable();
++	} else
++		preempt_enable_no_resched();
 +}
++EXPORT_SYMBOL(raw_local_irq_restore);
 +
-+void xen_pgd_unpin(unsigned long ptr)
++void raw_local_irq_disable(void)
 +{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_UNPIN_TABLE;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++	struct vcpu_info *_vcpu;
++
++	preempt_disable();
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
++	_vcpu->evtchn_upcall_mask = 1;
++	preempt_enable_no_resched();
 +}
++EXPORT_SYMBOL(raw_local_irq_disable);
 +
-+void xen_set_ldt(unsigned long ptr, unsigned long len)
++void raw_local_irq_enable(void)
 +{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_SET_LDT;
-+	op.arg1.linear_addr = ptr;
-+	op.arg2.nr_ents     = len;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++	struct vcpu_info *_vcpu;
++
++	preempt_disable();
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
++	_vcpu->evtchn_upcall_mask = 0;
++	barrier(); /* unmask then check (avoid races) */
++	if (unlikely(_vcpu->evtchn_upcall_pending))
++		force_evtchn_callback();
++	preempt_enable();
 +}
++EXPORT_SYMBOL(raw_local_irq_enable);
 +
 +/*
-+ * Bitmap is indexed by page number. If bit is set, the page is part of a
-+ * xen_create_contiguous_region() area of memory.
++ * For spinlocks, etc.:
 + */
-+unsigned long *contiguous_bitmap;
 +
-+static void contiguous_bitmap_set(
-+	unsigned long first_page, unsigned long nr_pages)
++unsigned long __raw_local_irq_save(void)
 +{
-+	unsigned long start_off, end_off, curr_idx, end_idx;
++	struct vcpu_info *_vcpu;
++	unsigned long flags;
 +
-+	curr_idx  = first_page / BITS_PER_LONG;
-+	start_off = first_page & (BITS_PER_LONG-1);
-+	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
-+	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
++	preempt_disable();
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
++	flags = _vcpu->evtchn_upcall_mask;
++	_vcpu->evtchn_upcall_mask = 1;
++	preempt_enable_no_resched();
 +
-+	if (curr_idx == end_idx) {
-+		contiguous_bitmap[curr_idx] |=
-+			((1UL<<end_off)-1) & -(1UL<<start_off);
-+	} else {
-+		contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
-+		while ( ++curr_idx < end_idx )
-+			contiguous_bitmap[curr_idx] = ~0UL;
-+		contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
-+	}
++	return flags;
 +}
++EXPORT_SYMBOL(__raw_local_irq_save);
 +
-+static void contiguous_bitmap_clear(
-+	unsigned long first_page, unsigned long nr_pages)
++/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
++int raw_irqs_disabled(void)
 +{
-+	unsigned long start_off, end_off, curr_idx, end_idx;
++	struct vcpu_info *_vcpu;
++	int disabled;
 +
-+	curr_idx  = first_page / BITS_PER_LONG;
-+	start_off = first_page & (BITS_PER_LONG-1);
-+	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
-+	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
++	preempt_disable();
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
++	disabled = (_vcpu->evtchn_upcall_mask != 0);
++	preempt_enable_no_resched();
 +
-+	if (curr_idx == end_idx) {
-+		contiguous_bitmap[curr_idx] &=
-+			-(1UL<<end_off) | ((1UL<<start_off)-1);
-+	} else {
-+		contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
-+		while ( ++curr_idx != end_idx )
-+			contiguous_bitmap[curr_idx] = 0;
-+		contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
-+	}
++	return disabled;
 +}
++EXPORT_SYMBOL(raw_irqs_disabled);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/mach-xen/Makefile b/arch/i386/mach-xen/Makefile
+--- a/arch/i386/mach-xen/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/mach-xen/Makefile	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,5 @@
++#
++# Makefile for the linux kernel.
++#
 +
-+/* Protected by balloon_lock. */
-+#define MAX_CONTIG_ORDER 9 /* 2MB */
-+static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
-+static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
++obj-y				:= setup.o irqflags.o
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/mach-xen/setup.c b/arch/i386/mach-xen/setup.c
+--- a/arch/i386/mach-xen/setup.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/mach-xen/setup.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,161 @@
++/*
++ *	Machine specific setup for generic
++ */
 +
-+/* Ensure multi-page extents are contiguous in machine memory. */
-+int xen_create_contiguous_region(
-+	unsigned long vstart, unsigned int order, unsigned int address_bits)
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <asm/acpi.h>
++#include <asm/arch_hooks.h>
++#include <asm/e820.h>
++#include <asm/setup.h>
++#include <asm/fixmap.h>
++
++#include <xen/interface/callback.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_HOTPLUG_CPU
++#define DEFAULT_SEND_IPI	(1)
++#else
++#define DEFAULT_SEND_IPI	(0)
++#endif
++
++int no_broadcast=DEFAULT_SEND_IPI;
++
++static __init int no_ipi_broadcast(char *str)
 +{
-+	unsigned long *in_frames = discontig_frames, out_frame;
-+	unsigned long  frame, i, flags;
-+	long           rc;
-+	int            success;
-+	struct xen_memory_exchange exchange = {
-+		.in = {
-+			.nr_extents   = 1UL << order,
-+			.extent_order = 0,
-+			.domid        = DOMID_SELF
-+		},
-+		.out = {
-+			.nr_extents   = 1,
-+			.extent_order = order,
-+			.address_bits = address_bits,
-+			.domid        = DOMID_SELF
-+		}
-+	};
++	get_option(&str, &no_broadcast);
++	printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
++											"IPI Broadcast");
++	return 1;
++}
 +
-+	/*
-+	 * Currently an auto-translated guest will not perform I/O, nor will
-+	 * it require PAE page directories below 4GB. Therefore any calls to
-+	 * this function are redundant and can be ignored.
-+	 */
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return 0;
++__setup("no_ipi_broadcast", no_ipi_broadcast);
 +
-+	if (unlikely(order > MAX_CONTIG_ORDER))
-+		return -ENOMEM;
++static int __init print_ipi_mode(void)
++{
++	printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
++											"Shortcut");
++	return 0;
++}
 +
-+	set_xen_guest_handle(exchange.in.extent_start, in_frames);
-+	set_xen_guest_handle(exchange.out.extent_start, &out_frame);
++late_initcall(print_ipi_mode);
 +
-+	scrub_pages(vstart, 1 << order);
++/**
++ * machine_specific_memory_setup - Hook for machine specific memory setup.
++ *
++ * Description:
++ *	This is included late in kernel/setup.c so that it can make
++ *	use of all of the static functions.
++ **/
 +
-+	balloon_lock(flags);
++char * __init machine_specific_memory_setup(void)
++{
++	int rc;
++	struct xen_memory_map memmap;
++	/*
++	 * This is rather large for a stack variable but this early in
++	 * the boot process we know we have plenty slack space.
++	 */
++	struct e820entry map[E820MAX];
 +
-+	/* 1. Zap current PTEs, remembering MFNs. */
-+	for (i = 0; i < (1UL<<order); i++) {
-+		in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
-+		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+					__pte_ma(0), 0);
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
-+			INVALID_P2M_ENTRY);
-+	}
-+	if (HYPERVISOR_multicall(cr_mcl, i))
-+		BUG();
++	memmap.nr_entries = E820MAX;
++	set_xen_guest_handle(memmap.buffer, map);
 +
-+	/* 2. Get a new contiguous memory extent. */
-+	out_frame = __pa(vstart) >> PAGE_SHIFT;
-+	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
-+	success = (exchange.nr_exchanged == (1UL << order));
-+	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
-+	BUG_ON(success && (rc != 0));
-+	if (unlikely(rc == -ENOSYS)) {
-+		/* Compatibility when XENMEM_exchange is unsupported. */
-+		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+					 &exchange.in) != (1UL << order))
-+			BUG();
-+		success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+						&exchange.out) == 1);
-+		if (!success) {
-+			/* Couldn't get special memory: fall back to normal. */
-+			for (i = 0; i < (1UL<<order); i++)
-+				in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
-+			if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+						 &exchange.in) != (1UL<<order))
-+				BUG();
-+		}
++	rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
++	if ( rc == -ENOSYS ) {
++		memmap.nr_entries = 1;
++		map[0].addr = 0ULL;
++		map[0].size = PFN_PHYS(xen_start_info->nr_pages);
++		/* 8MB slack (to balance backend allocations). */
++		map[0].size += 8ULL << 20;
++		map[0].type = E820_RAM;
++		rc = 0;
 +	}
++	BUG_ON(rc);
 +
-+	/* 3. Map the new extent in place of old pages. */
-+	for (i = 0; i < (1UL<<order); i++) {
-+		frame = success ? (out_frame + i) : in_frames[i];
-+		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+					pfn_pte_ma(frame, PAGE_KERNEL), 0);
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
-+	}
++	sanitize_e820_map(map, (char *)&memmap.nr_entries);
 +
-+	cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
-+						   ? UVMF_TLB_FLUSH|UVMF_ALL
-+						   : UVMF_INVLPG|UVMF_ALL;
-+	if (HYPERVISOR_multicall(cr_mcl, i))
-+		BUG();
++	BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
 +
-+	if (success)
-+		contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
-+				      1UL << order);
++	return "Xen";
++}
 +
-+	balloon_unlock(flags);
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
 +
-+	return success ? 0 : -ENOMEM;
-+}
++unsigned long *machine_to_phys_mapping;
++EXPORT_SYMBOL(machine_to_phys_mapping);
++unsigned int machine_to_phys_order;
++EXPORT_SYMBOL(machine_to_phys_order);
 +
-+void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++void __init machine_specific_arch_setup(void)
 +{
-+	unsigned long *out_frames = discontig_frames, in_frame;
-+	unsigned long  frame, i, flags;
-+	long           rc;
-+	int            success;
-+	struct xen_memory_exchange exchange = {
-+		.in = {
-+			.nr_extents   = 1,
-+			.extent_order = order,
-+			.domid        = DOMID_SELF
-+		},
-+		.out = {
-+			.nr_extents   = 1UL << order,
-+			.extent_order = 0,
-+			.domid        = DOMID_SELF
-+		}
++	int ret;
++	struct xen_machphys_mapping mapping;
++	unsigned long machine_to_phys_nr_ents;
++	struct xen_platform_parameters pp;
++	struct callback_register event = {
++		.type = CALLBACKTYPE_event,
++		.address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
++	};
++	struct callback_register failsafe = {
++		.type = CALLBACKTYPE_failsafe,
++		.address = { __KERNEL_CS, (unsigned long)failsafe_callback },
++	};
++	struct callback_register nmi_cb = {
++		.type = CALLBACKTYPE_nmi,
++		.address = { __KERNEL_CS, (unsigned long)nmi },
 +	};
 +
-+	if (xen_feature(XENFEAT_auto_translated_physmap) ||
-+	    !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
-+		return;
-+
-+	if (unlikely(order > MAX_CONTIG_ORDER))
-+		return;
++	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
++	if (ret == 0)
++		ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
++	if (ret == -ENOSYS)
++		ret = HYPERVISOR_set_callbacks(
++			event.address.cs, event.address.eip,
++			failsafe.address.cs, failsafe.address.eip);
++	BUG_ON(ret);
 +
-+	set_xen_guest_handle(exchange.in.extent_start, &in_frame);
-+	set_xen_guest_handle(exchange.out.extent_start, out_frames);
++	ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
++	if (ret == -ENOSYS) {
++		struct xennmi_callback cb;
 +
-+	scrub_pages(vstart, 1 << order);
++		cb.handler_address = nmi_cb.address.eip;
++		HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++	}
 +
-+	balloon_lock(flags);
++	if (HYPERVISOR_xen_version(XENVER_platform_parameters,
++				   &pp) == 0)
++		set_fixaddr_top(pp.virt_start - PAGE_SIZE);
 +
-+	contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
++	machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
++	machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
++		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
++		machine_to_phys_nr_ents = mapping.max_mfn + 1;
++	}
++	while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
++		machine_to_phys_order++;
++}
 +
-+	/* 1. Find start MFN of contiguous extent. */
-+	in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
++/**
++ * pre_setup_arch_hook - hook called prior to any setup_arch() execution
++ *
++ * Description:
++ *	generally used to activate any machine specific identification
++ *	routines that may be needed before setup_arch() runs.  On VISWS
++ *	this is used to get the board revision and type.
++ **/
++void __init pre_setup_arch_hook(void)
++{
++	int max_cmdline;
 +
-+	/* 2. Zap current PTEs. */
-+	for (i = 0; i < (1UL<<order); i++) {
-+		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+					__pte_ma(0), 0);
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
-+			INVALID_P2M_ENTRY);
-+		out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
-+	}
-+	if (HYPERVISOR_multicall(cr_mcl, i))
-+		BUG();
++	if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++		max_cmdline = COMMAND_LINE_SIZE;
++	memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++	/* Save unparsed command line copy for /proc/cmdline */
++	saved_command_line[max_cmdline-1] = '\0';
++}
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/Makefile b/arch/i386/Makefile
+--- a/arch/i386/Makefile	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/Makefile	2007-03-14 10:55:14.000000000 +0100
+@@ -60,6 +60,11 @@
+ 
+ CFLAGS += $(cflags-y)
+ 
++cppflags-$(CONFIG_XEN) += \
++	-D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
 +
-+	/* 3. Do the exchange for non-contiguous MFNs. */
-+	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
-+	success = (exchange.nr_exchanged == 1);
-+	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
-+	BUG_ON(success && (rc != 0));
-+	if (unlikely(rc == -ENOSYS)) {
-+		/* Compatibility when XENMEM_exchange is unsupported. */
-+		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+					 &exchange.in) != 1)
-+			BUG();
-+		if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+					 &exchange.out) != (1UL << order))
-+			BUG();
-+		success = 1;
-+	}
++CPPFLAGS += $(cppflags-y)
 +
-+	/* 4. Map new pages in place of old pages. */
-+	for (i = 0; i < (1UL<<order); i++) {
-+		frame = success ? out_frames[i] : (in_frame + i);
-+		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
-+					pfn_pte_ma(frame, PAGE_KERNEL), 0);
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
-+	}
+ # Default subarch .c files
+ mcore-y  := mach-default
+ 
+@@ -83,6 +88,10 @@
+ mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-i386/mach-summit
+ mcore-$(CONFIG_X86_SUMMIT)  := mach-default
+ 
++# Xen subarch support
++mflags-$(CONFIG_X86_XEN)	:= -Iinclude/asm-i386/mach-xen
++mcore-$(CONFIG_X86_XEN)		:= mach-xen
 +
-+	cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
-+						   ? UVMF_TLB_FLUSH|UVMF_ALL
-+						   : UVMF_INVLPG|UVMF_ALL;
-+	if (HYPERVISOR_multicall(cr_mcl, i))
-+		BUG();
+ # generic subarchitecture
+ mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-i386/mach-generic
+ mcore-$(CONFIG_X86_GENERICARCH) := mach-default
+@@ -117,6 +126,19 @@
+ PHONY += zImage bzImage compressed zlilo bzlilo \
+          zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
+ 
++ifdef CONFIG_XEN
++CPPFLAGS := -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
++head-y := arch/i386/kernel/head-xen.o arch/i386/kernel/init_task-xen.o
++boot := arch/i386/boot-xen
++.PHONY: vmlinuz
++all: vmlinuz
 +
-+	balloon_unlock(flags);
-+}
++vmlinuz: vmlinux
++	$(Q)$(MAKE) $(build)=$(boot) $@
 +
-+#ifdef __i386__
-+int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
-+{
-+	__u32 *lp = (__u32 *)((char *)ldt + entry * 8);
-+	maddr_t mach_lp = arbitrary_virt_to_machine(lp);
-+	return HYPERVISOR_update_descriptor(
-+		mach_lp, (u64)entry_a | ((u64)entry_b<<32));
-+}
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/mm/init-xen.c linux-2.6.18-xen/arch/i386/mm/init-xen.c
---- linux-2.6.18.3/arch/i386/mm/init-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/mm/init-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,851 @@
++install:
++	$(Q)$(MAKE) $(build)=$(boot) XENGUEST=$(XENGUEST) $@
++else
+ all: bzImage
+ 
+ # KBUILD_IMAGE specify target image being built
+@@ -139,6 +161,7 @@
+ 
+ install:
+ 	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
++endif
+ 
+ archclean:
+ 	$(Q)$(MAKE) $(clean)=arch/i386/boot
+@@ -157,3 +180,4 @@
+ CLEAN_FILES += arch/$(ARCH)/boot/fdimage \
+ 	       arch/$(ARCH)/boot/image.iso \
+ 	       arch/$(ARCH)/boot/mtools.conf
++CLEAN_FILES += vmlinuz vmlinux-stripped
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/mm/fault-xen.c b/arch/i386/mm/fault-xen.c
+--- a/arch/i386/mm/fault-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/mm/fault-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,762 @@
 +/*
-+ *  linux/arch/i386/mm/init.c
++ *  linux/arch/i386/mm/fault.c
 + *
 + *  Copyright (C) 1995  Linus Torvalds
-+ *
-+ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 + */
 +
-+#include <linux/module.h>
 +#include <linux/signal.h>
 +#include <linux/sched.h>
 +#include <linux/kernel.h>
@@ -17863,3832 +13673,4047 @@
 +#include <linux/ptrace.h>
 +#include <linux/mman.h>
 +#include <linux/mm.h>
-+#include <linux/hugetlb.h>
-+#include <linux/swap.h>
 +#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
 +#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h>		/* For unblank_screen() */
 +#include <linux/highmem.h>
-+#include <linux/pagemap.h>
-+#include <linux/poison.h>
-+#include <linux/bootmem.h>
-+#include <linux/slab.h>
-+#include <linux/proc_fs.h>
-+#include <linux/efi.h>
-+#include <linux/memory_hotplug.h>
-+#include <linux/initrd.h>
-+#include <linux/cpumask.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/scatterlist.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++#include <linux/uaccess.h>
 +
-+#include <asm/processor.h>
 +#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/dma.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/apic.h>
-+#include <asm/tlb.h>
-+#include <asm/tlbflush.h>
-+#include <asm/sections.h>
-+#include <asm/hypervisor.h>
-+#include <asm/swiotlb.h>
-+
-+extern unsigned long *contiguous_bitmap;
++#include <asm/desc.h>
++#include <asm/kdebug.h>
++#include <asm/segment.h>
 +
-+unsigned int __VMALLOC_RESERVE = 128 << 20;
++extern void die(const char *,struct pt_regs *,long);
 +
-+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-+unsigned long highstart_pfn, highend_pfn;
++static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
 +
-+static int noinline do_test_wp_bit(void);
++int register_page_fault_notifier(struct notifier_block *nb)
++{
++	vmalloc_sync_all();
++	return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
++}
++EXPORT_SYMBOL_GPL(register_page_fault_notifier);
 +
-+/*
-+ * Creates a middle page table and puts a pointer to it in the
-+ * given global directory entry. This only returns the gd entry
-+ * in non-PAE compilation mode, since the middle layer is folded.
-+ */
-+static pmd_t * __init one_md_table_init(pgd_t *pgd)
++int unregister_page_fault_notifier(struct notifier_block *nb)
 +{
-+	pud_t *pud;
-+	pmd_t *pmd_table;
-+		
-+#ifdef CONFIG_X86_PAE
-+	pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-+	make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
-+	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
-+	pud = pud_offset(pgd, 0);
-+	if (pmd_table != pmd_offset(pud, 0)) 
-+		BUG();
-+#else
-+	pud = pud_offset(pgd, 0);
-+	pmd_table = pmd_offset(pud, 0);
-+#endif
++	return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
++}
++EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
 +
-+	return pmd_table;
++static inline int notify_page_fault(enum die_val val, const char *str,
++			struct pt_regs *regs, long err, int trap, int sig)
++{
++	struct die_args args = {
++		.regs = regs,
++		.str = str,
++		.err = err,
++		.trapnr = trap,
++		.signr = sig
++	};
++	return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
 +}
 +
 +/*
-+ * Create a page table and place a pointer to it in a middle page
-+ * directory entry.
++ * Unlock any spinlocks which will prevent us from getting the
++ * message out 
 + */
-+static pte_t * __init one_page_table_init(pmd_t *pmd)
++void bust_spinlocks(int yes)
 +{
-+	if (pmd_none(*pmd)) {
-+		pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-+		make_lowmem_page_readonly(page_table,
-+					  XENFEAT_writable_page_tables);
-+		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
-+		if (page_table != pte_offset_kernel(pmd, 0))
-+			BUG();	
++	int loglevel_save = console_loglevel;
 +
-+		return page_table;
++	if (yes) {
++		oops_in_progress = 1;
++		return;
 +	}
-+	
-+	return pte_offset_kernel(pmd, 0);
++#ifdef CONFIG_VT
++	unblank_screen();
++#endif
++	oops_in_progress = 0;
++	/*
++	 * OK, the message is on the console.  Now we call printk()
++	 * without oops_in_progress set so that printk will give klogd
++	 * a poke.  Hold onto your hats...
++	 */
++	console_loglevel = 15;		/* NMI oopser may have shut the console up */
++	printk(" ");
++	console_loglevel = loglevel_save;
 +}
 +
 +/*
-+ * This function initializes a certain range of kernel virtual memory 
-+ * with new bootmem page tables, everywhere page tables are missing in
-+ * the given range.
-+ */
-+
-+/*
-+ * NOTE: The pagetables are allocated contiguous on the physical space 
-+ * so we can cache the place of the first one and move around without 
-+ * checking the pgd every time.
++ * Return EIP plus the CS segment base.  The segment limit is also
++ * adjusted, clamped to the kernel/user address space (whichever is
++ * appropriate), and returned in *eip_limit.
++ *
++ * The segment is checked, because it might have been changed by another
++ * task between the original faulting instruction and here.
++ *
++ * If CS is no longer a valid code segment, or if EIP is beyond the
++ * limit, or if it is a kernel address when CS is not a kernel segment,
++ * then the returned value will be greater than *eip_limit.
++ * 
++ * This is slow, but is very rarely executed.
 + */
-+static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
++static inline unsigned long get_segment_eip(struct pt_regs *regs,
++					    unsigned long *eip_limit)
 +{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	int pgd_idx, pmd_idx;
-+	unsigned long vaddr;
++	unsigned long eip = regs->eip;
++	unsigned seg = regs->xcs & 0xffff;
++	u32 seg_ar, seg_limit, base, *desc;
 +
-+	vaddr = start;
-+	pgd_idx = pgd_index(vaddr);
-+	pmd_idx = pmd_index(vaddr);
-+	pgd = pgd_base + pgd_idx;
++	/* Unlikely, but must come before segment checks. */
++	if (unlikely(regs->eflags & VM_MASK)) {
++		base = seg << 4;
++		*eip_limit = base + 0xffff;
++		return base + (eip & 0xffff);
++	}
 +
-+	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
-+		if (pgd_none(*pgd)) 
-+			one_md_table_init(pgd);
-+		pud = pud_offset(pgd, vaddr);
-+		pmd = pmd_offset(pud, vaddr);
-+		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
-+			if (vaddr < HYPERVISOR_VIRT_START && pmd_none(*pmd)) 
-+				one_page_table_init(pmd);
++	/* The standard kernel/user address space limit. */
++	*eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
++	
++	/* By far the most common cases. */
++	if (likely(SEGMENT_IS_FLAT_CODE(seg)))
++		return eip;
 +
-+			vaddr += PMD_SIZE;
-+		}
-+		pmd_idx = 0;
++	/* Check the segment exists, is within the current LDT/GDT size,
++	   that kernel/user (ring 0..3) has the appropriate privilege,
++	   that it's a code segment, and get the limit. */
++	__asm__ ("larl %3,%0; lsll %3,%1"
++		 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
++	if ((~seg_ar & 0x9800) || eip > seg_limit) {
++		*eip_limit = 0;
++		return 1;	 /* So that returned eip > *eip_limit. */
 +	}
-+}
 +
-+static inline int is_kernel_text(unsigned long addr)
-+{
-+	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
-+		return 1;
-+	return 0;
-+}
++	/* Get the GDT/LDT descriptor base. 
++	   When you look for races in this code remember that
++	   LDT and other horrors are only used in user space. */
++	if (seg & (1<<2)) {
++		/* Must lock the LDT while reading it. */
++		down(&current->mm->context.sem);
++		desc = current->mm->context.ldt;
++		desc = (void *)desc + (seg & ~7);
++	} else {
++		/* Must disable preemption while reading the GDT. */
++ 		desc = (u32 *)get_cpu_gdt_table(get_cpu());
++		desc = (void *)desc + (seg & ~7);
++	}
 +
-+/*
-+ * This maps the physical memory to kernel virtual address space, a total 
-+ * of max_low_pfn pages, by creating page tables starting from address 
-+ * PAGE_OFFSET.
-+ */
-+static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
-+{
-+	unsigned long pfn;
-+	pgd_t *pgd;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	int pgd_idx, pmd_idx, pte_ofs;
++	/* Decode the code segment base from the descriptor */
++	base = get_desc_base((unsigned long *)desc);
 +
-+	unsigned long max_ram_pfn = xen_start_info->nr_pages;
-+	if (max_ram_pfn > max_low_pfn)
-+		max_ram_pfn = max_low_pfn;
++	if (seg & (1<<2)) { 
++		up(&current->mm->context.sem);
++	} else
++		put_cpu();
 +
-+	pgd_idx = pgd_index(PAGE_OFFSET);
-+	pgd = pgd_base + pgd_idx;
-+	pfn = 0;
-+	pmd_idx = pmd_index(PAGE_OFFSET);
-+	pte_ofs = pte_index(PAGE_OFFSET);
++	/* Adjust EIP and segment limit, and clamp at the kernel limit.
++	   It's legitimate for segments to wrap at 0xffffffff. */
++	seg_limit += base;
++	if (seg_limit < *eip_limit && seg_limit >= base)
++		*eip_limit = seg_limit;
++	return eip + base;
++}
 +
-+	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
-+#ifdef CONFIG_XEN
-+		/*
-+		 * Native linux hasn't PAE-paging enabled yet at this
-+		 * point.  When running as xen domain we are in PAE
-+		 * mode already, thus we can't simply hook a empty
-+		 * pmd.  That would kill the mappings we are currently
-+		 * using ...
-+		 */
-+		pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
-+#else
-+		pmd = one_md_table_init(pgd);
-+#endif
-+		if (pfn >= max_low_pfn)
-+			continue;
-+		pmd += pmd_idx;
-+		for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
-+			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
-+			if (address >= HYPERVISOR_VIRT_START)
-+				continue;
++/* 
++ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
++ * Check that here and ignore it.
++ */
++static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
++{ 
++	unsigned long limit;
++	unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
++	int scan_more = 1;
++	int prefetch = 0; 
++	int i;
 +
-+			/* Map with big pages if possible, otherwise create normal page tables. */
-+			if (cpu_has_pse) {
-+				unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
++	for (i = 0; scan_more && i < 15; i++) { 
++		unsigned char opcode;
++		unsigned char instr_hi;
++		unsigned char instr_lo;
 +
-+				if (is_kernel_text(address) || is_kernel_text(address2))
-+					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
-+				else
-+					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
-+				pfn += PTRS_PER_PTE;
-+			} else {
-+				pte = one_page_table_init(pmd);
++		if (instr > (unsigned char *)limit)
++			break;
++		if (probe_kernel_address(instr, opcode))
++			break; 
 +
-+				pte += pte_ofs;
-+				for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
-+						/* XEN: Only map initial RAM allocation. */
-+						if ((pfn >= max_ram_pfn) || pte_present(*pte))
-+							continue;
-+						if (is_kernel_text(address))
-+							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
-+						else
-+							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
-+				}
-+				pte_ofs = 0;
-+			}
-+		}
-+		pmd_idx = 0;
++		instr_hi = opcode & 0xf0; 
++		instr_lo = opcode & 0x0f; 
++		instr++;
++
++		switch (instr_hi) { 
++		case 0x20:
++		case 0x30:
++			/* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
++			scan_more = ((instr_lo & 7) == 0x6);
++			break;
++			
++		case 0x60:
++			/* 0x64 thru 0x67 are valid prefixes in all modes. */
++			scan_more = (instr_lo & 0xC) == 0x4;
++			break;		
++		case 0xF0:
++			/* 0xF0, 0xF2, and 0xF3 are valid prefixes */
++			scan_more = !instr_lo || (instr_lo>>1) == 1;
++			break;			
++		case 0x00:
++			/* Prefetch instruction is 0x0F0D or 0x0F18 */
++			scan_more = 0;
++			if (instr > (unsigned char *)limit)
++				break;
++			if (probe_kernel_address(instr, opcode))
++				break;
++			prefetch = (instr_lo == 0xF) &&
++				(opcode == 0x0D || opcode == 0x18);
++			break;			
++		default:
++			scan_more = 0;
++			break;
++		} 
 +	}
++	return prefetch;
 +}
 +
-+#ifndef CONFIG_XEN
-+
-+static inline int page_kills_ppro(unsigned long pagenr)
++static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++			      unsigned long error_code)
 +{
-+	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
-+		return 1;
++	if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++		     boot_cpu_data.x86 >= 6)) {
++		/* Catch an obscure case of prefetch inside an NX page. */
++		if (nx_enabled && (error_code & 16))
++			return 0;
++		return __is_prefetch(regs, addr);
++	}
 +	return 0;
-+}
-+
-+#else
++} 
 +
-+#define page_kills_ppro(p)	0
++static noinline void force_sig_info_fault(int si_signo, int si_code,
++	unsigned long address, struct task_struct *tsk)
++{
++	siginfo_t info;
 +
-+#endif
++	info.si_signo = si_signo;
++	info.si_errno = 0;
++	info.si_code = si_code;
++	info.si_addr = (void __user *)address;
++	force_sig_info(si_signo, &info, tsk);
++}
 +
-+extern int is_available_memory(efi_memory_desc_t *);
++fastcall void do_invalid_op(struct pt_regs *, unsigned long);
 +
-+int page_is_ram(unsigned long pagenr)
++#ifdef CONFIG_X86_PAE
++static void dump_fault_path(unsigned long address)
 +{
-+	int i;
-+	unsigned long addr, end;
-+
-+	if (efi_enabled) {
-+		efi_memory_desc_t *md;
-+		void *p;
-+
-+		for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-+			md = p;
-+			if (!is_available_memory(md))
-+				continue;
-+			addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-+			end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
++	unsigned long *p, page;
++	unsigned long mfn; 
 +
-+			if ((pagenr >= addr) && (pagenr < end))
-+				return 1;
++	page = read_cr3();
++	p  = (unsigned long *)__va(page);
++	p += (address >> 30) * 2;
++	printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
++	if (p[0] & 1) {
++		mfn  = (p[0] >> PAGE_SHIFT) | ((p[1] & 0x7) << 20); 
++		page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
++		p  = (unsigned long *)__va(page);
++		address &= 0x3fffffff;
++		p += (address >> 21) * 2;
++		printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n", 
++		       page, p[1], p[0]);
++#ifndef CONFIG_HIGHPTE
++		if (p[0] & 1) {
++			mfn  = (p[0] >> PAGE_SHIFT) | ((p[1] & 0x7) << 20); 
++			page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
++			p  = (unsigned long *) __va(page);
++			address &= 0x001fffff;
++			p += (address >> 12) * 2;
++			printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
++			       page, p[1], p[0]);
 +		}
-+		return 0;
++#endif
 +	}
++}
++#else
++static void dump_fault_path(unsigned long address)
++{
++	unsigned long page;
 +
-+	for (i = 0; i < e820.nr_map; i++) {
-+
-+		if (e820.map[i].type != E820_RAM)	/* not usable memory */
-+			continue;
-+		/*
-+		 *	!!!FIXME!!! Some BIOSen report areas as RAM that
-+		 *	are not. Notably the 640->1Mb area. We need a sanity
-+		 *	check here.
-+		 */
-+		addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-+		end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
-+		if  ((pagenr >= addr) && (pagenr < end))
-+			return 1;
++	page = read_cr3();
++	page = ((unsigned long *) __va(page))[address >> 22];
++	if (oops_may_print())
++		printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
++		       machine_to_phys(page));
++	/*
++	 * We must not directly access the pte in the highpte
++	 * case, the page table might be allocated in highmem.
++	 * And lets rather not kmap-atomic the pte, just in case
++	 * it's allocated already.
++	 */
++#ifndef CONFIG_HIGHPTE
++	if ((page & 1) && oops_may_print()) {
++		page &= PAGE_MASK;
++		address &= 0x003ff000;
++		page = machine_to_phys(page);
++		page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
++		printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
++		       machine_to_phys(page));
 +	}
-+	return 0;
++#endif
 +}
++#endif
 +
-+#ifdef CONFIG_HIGHMEM
-+pte_t *kmap_pte;
-+pgprot_t kmap_prot;
++static int spurious_fault(struct pt_regs *regs,
++			  unsigned long address,
++			  unsigned long error_code)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
 +
-+#define kmap_get_fixmap_pte(vaddr)					\
-+	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
++#ifdef CONFIG_XEN
++	/* Faults in hypervisor area are never spurious. */
++	if (address >= HYPERVISOR_VIRT_START)
++		return 0;
++#endif
 +
-+static void __init kmap_init(void)
-+{
-+	unsigned long kmap_vstart;
++	/* Reserved-bit violation or user access to kernel space? */
++	if (error_code & 0x0c)
++		return 0;
 +
-+	/* cache the first kmap pte */
-+	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
-+	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
++	pgd = init_mm.pgd + pgd_index(address);
++	if (!pgd_present(*pgd))
++		return 0;
 +
-+	kmap_prot = PAGE_KERNEL;
++	pud = pud_offset(pgd, address);
++	if (!pud_present(*pud))
++		return 0;
++
++	pmd = pmd_offset(pud, address);
++	if (!pmd_present(*pmd))
++		return 0;
++
++	pte = pte_offset_kernel(pmd, address);
++	if (!pte_present(*pte))
++		return 0;
++	if ((error_code & 0x02) && !pte_write(*pte))
++		return 0;
++#ifdef CONFIG_X86_PAE
++	if ((error_code & 0x10) && (pte_val(*pte) & _PAGE_NX))
++		return 0;
++#endif
++
++	return 1;
 +}
 +
-+static void __init permanent_kmaps_init(pgd_t *pgd_base)
++static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 +{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	unsigned long vaddr;
++	unsigned index = pgd_index(address);
++	pgd_t *pgd_k;
++	pud_t *pud, *pud_k;
++	pmd_t *pmd, *pmd_k;
++
++	pgd += index;
++	pgd_k = init_mm.pgd + index;
 +
-+	vaddr = PKMAP_BASE;
-+	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
++	if (!pgd_present(*pgd_k))
++		return NULL;
 +
-+	pgd = swapper_pg_dir + pgd_index(vaddr);
-+	pud = pud_offset(pgd, vaddr);
-+	pmd = pmd_offset(pud, vaddr);
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	pkmap_page_table = pte;	
-+}
++	/*
++	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
++	 * and redundant with the set_pmd() on non-PAE. As would
++	 * set_pud.
++	 */
 +
-+static void __meminit free_new_highpage(struct page *page, int pfn)
-+{
-+	init_page_count(page);
-+	if (pfn < xen_start_info->nr_pages)
-+		__free_page(page);
-+	totalhigh_pages++;
-+}
++	pud = pud_offset(pgd, address);
++	pud_k = pud_offset(pgd_k, address);
++	if (!pud_present(*pud_k))
++		return NULL;
 +
-+void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
-+{
-+	if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
-+		ClearPageReserved(page);
-+		free_new_highpage(page, pfn);
-+	} else
-+		SetPageReserved(page);
++	pmd = pmd_offset(pud, address);
++	pmd_k = pmd_offset(pud_k, address);
++	if (!pmd_present(*pmd_k))
++		return NULL;
++	if (!pmd_present(*pmd))
++#ifndef CONFIG_XEN
++		set_pmd(pmd, *pmd_k);
++#else
++		/*
++		 * When running on Xen we must launder *pmd_k through
++		 * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
++		 */
++		set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
++#endif
++	else
++		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++	return pmd_k;
 +}
 +
-+static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
++/*
++ * Handle a fault on the vmalloc or module mapping area
++ *
++ * This assumes no large pages in there.
++ */
++static inline int vmalloc_fault(unsigned long address)
 +{
-+	free_new_highpage(page, pfn);
-+	totalram_pages++;
-+#ifdef CONFIG_FLATMEM
-+	max_mapnr = max(pfn, max_mapnr);
-+#endif
-+	num_physpages++;
++	unsigned long pgd_paddr;
++	pmd_t *pmd_k;
++	pte_t *pte_k;
++	/*
++	 * Synchronize this task's top level page-table
++	 * with the 'reference' page table.
++	 *
++	 * Do _not_ use "current" here. We might be inside
++	 * an interrupt in the middle of a task switch..
++	 */
++	pgd_paddr = read_cr3();
++	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
++	if (!pmd_k)
++		return -1;
++	pte_k = pte_offset_kernel(pmd_k, address);
++	if (!pte_present(*pte_k))
++		return -1;
 +	return 0;
 +}
 +
 +/*
-+ * Not currently handling the NUMA case.
-+ * Assuming single node and all memory that
-+ * has been added dynamically that would be
-+ * onlined here is in HIGHMEM
++ * This routine handles page faults.  It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ *
++ * error_code:
++ *	bit 0 == 0 means no page found, 1 means protection fault
++ *	bit 1 == 0 means read, 1 means write
++ *	bit 2 == 0 means kernel, 1 means user-mode
++ *	bit 3 == 1 means use of reserved bit detected
++ *	bit 4 == 1 means fault was an instruction fetch
 + */
-+void online_page(struct page *page)
++fastcall void __kprobes do_page_fault(struct pt_regs *regs,
++				      unsigned long error_code)
 +{
-+	ClearPageReserved(page);
-+	add_one_highpage_hotplug(page, page_to_pfn(page));
-+}
++	struct task_struct *tsk;
++	struct mm_struct *mm;
++	struct vm_area_struct * vma;
++	unsigned long address;
++	int write, si_code;
 +
++	/* get the address */
++        address = read_cr2();
 +
-+#ifdef CONFIG_NUMA
-+extern void set_highmem_pages_init(int);
-+#else
-+static void __init set_highmem_pages_init(int bad_ppro)
-+{
-+	int pfn;
-+	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
-+		add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
-+	totalram_pages += totalhigh_pages;
-+}
-+#endif /* CONFIG_FLATMEM */
++	/* Set the "privileged fault" bit to something sane. */
++	error_code &= ~4;
++	error_code |= (regs->xcs & 2) << 1;
++	if (regs->eflags & X86_EFLAGS_VM)
++		error_code |= 4;
 +
-+#else
-+#define kmap_init() do { } while (0)
-+#define permanent_kmaps_init(pgd_base) do { } while (0)
-+#define set_highmem_pages_init(bad_ppro) do { } while (0)
-+#endif /* CONFIG_HIGHMEM */
++	tsk = current;
 +
-+unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
-+EXPORT_SYMBOL(__PAGE_KERNEL);
-+unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
++	si_code = SEGV_MAPERR;
 +
-+#ifdef CONFIG_NUMA
-+extern void __init remap_numa_kva(void);
-+#else
-+#define remap_numa_kva() do {} while (0)
++	/*
++	 * We fault-in kernel-space virtual memory on-demand. The
++	 * 'reference' page table is init_mm.pgd.
++	 *
++	 * NOTE! We MUST NOT take any locks for this case. We may
++	 * be in an interrupt or a critical region, and should
++	 * only copy the information from the master page table,
++	 * nothing more.
++	 *
++	 * This verifies that the fault happens in kernel space
++	 * (error_code & 4) == 0, and that the fault was not a
++	 * protection error (error_code & 9) == 0.
++	 */
++	if (unlikely(address >= TASK_SIZE)) {
++#ifdef CONFIG_XEN
++		/* Faults in hypervisor area can never be patched up. */
++		if (address >= HYPERVISOR_VIRT_START)
++			goto bad_area_nosemaphore;
 +#endif
++		if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
++			return;
++		/* Can take a spurious fault if mapping changes R/O -> R/W. */
++		if (spurious_fault(regs, address, error_code))
++			return;
++		if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++						SIGSEGV) == NOTIFY_STOP)
++			return;
++		/*
++		 * Don't take the mm semaphore here. If we fixup a prefetch
++		 * fault we could otherwise deadlock.
++		 */
++		goto bad_area_nosemaphore;
++	}
 +
-+pgd_t *swapper_pg_dir;
++	if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++					SIGSEGV) == NOTIFY_STOP)
++		return;
 +
-+static void __init pagetable_init (void)
-+{
-+	unsigned long vaddr;
-+	pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
++	/* It's safe to allow irq's after cr2 has been saved and the vmalloc
++	   fault has been handled. */
++	if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
++		local_irq_enable();
 +
-+	swapper_pg_dir = pgd_base;
-+	init_mm.pgd    = pgd_base;
++	mm = tsk->mm;
 +
-+	/* Enable PSE if available */
-+	if (cpu_has_pse) {
-+		set_in_cr4(X86_CR4_PSE);
-+	}
++	/*
++	 * If we're in an interrupt, have no user context or are running in an
++	 * atomic region then we must not take the fault..
++	 */
++	if (in_atomic() || !mm)
++		goto bad_area_nosemaphore;
 +
-+	/* Enable PGE if available */
-+	if (cpu_has_pge) {
-+		set_in_cr4(X86_CR4_PGE);
-+		__PAGE_KERNEL |= _PAGE_GLOBAL;
-+		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
++	/* When running in the kernel we expect faults to occur only to
++	 * addresses in user space.  All other faults represent errors in the
++	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an
++	 * erroneous fault occurring in a code path which already holds mmap_sem
++	 * we will deadlock attempting to validate the fault against the
++	 * address space.  Luckily the kernel only validly references user
++	 * space from well defined areas of code, which are listed in the
++	 * exceptions table.
++	 *
++	 * As the vast majority of faults will be valid we will only perform
++	 * the source reference check when there is a possibilty of a deadlock.
++	 * Attempt to lock the address space, if we cannot we then validate the
++	 * source.  If this is invalid we can skip the address space check,
++	 * thus avoiding the deadlock.
++	 */
++	if (!down_read_trylock(&mm->mmap_sem)) {
++		if ((error_code & 4) == 0 &&
++		    !search_exception_tables(regs->eip))
++			goto bad_area_nosemaphore;
++		down_read(&mm->mmap_sem);
 +	}
 +
-+	kernel_physical_mapping_init(pgd_base);
-+	remap_numa_kva();
++	vma = find_vma(mm, address);
++	if (!vma)
++		goto bad_area;
++	if (vma->vm_start <= address)
++		goto good_area;
++	if (!(vma->vm_flags & VM_GROWSDOWN))
++		goto bad_area;
++	if (error_code & 4) {
++		/*
++		 * Accessing the stack below %esp is always a bug.
++		 * The large cushion allows instructions like enter
++		 * and pusha to work.  ("enter $65535,$31" pushes
++		 * 32 pointers and then decrements %esp by 65535.)
++		 */
++		if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
++			goto bad_area;
++	}
++	if (expand_stack(vma, address))
++		goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++	si_code = SEGV_ACCERR;
++	write = 0;
++	switch (error_code & 3) {
++		default:	/* 3: write, present */
++				/* fall through */
++		case 2:		/* write, not present */
++			if (!(vma->vm_flags & VM_WRITE))
++				goto bad_area;
++			write++;
++			break;
++		case 1:		/* read, present */
++			goto bad_area;
++		case 0:		/* read, not present */
++			if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
++				goto bad_area;
++	}
 +
++ survive:
 +	/*
-+	 * Fixed mappings, only the page table structure has to be
-+	 * created - mappings will be set by set_fixmap():
++	 * If for any reason at all we couldn't handle the fault,
++	 * make sure we exit gracefully rather than endlessly redo
++	 * the fault.
 +	 */
-+	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
-+	page_table_range_init(vaddr, 0, pgd_base);
++	switch (handle_mm_fault(mm, vma, address, write)) {
++		case VM_FAULT_MINOR:
++			tsk->min_flt++;
++			break;
++		case VM_FAULT_MAJOR:
++			tsk->maj_flt++;
++			break;
++		case VM_FAULT_SIGBUS:
++			goto do_sigbus;
++		case VM_FAULT_OOM:
++			goto out_of_memory;
++		default:
++			BUG();
++	}
 +
-+	permanent_kmaps_init(pgd_base);
-+}
++	/*
++	 * Did it hit the DOS screen memory VA from vm86 mode?
++	 */
++	if (regs->eflags & VM_MASK) {
++		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
++		if (bit < 32)
++			tsk->thread.screen_bitmap |= 1 << bit;
++	}
++	up_read(&mm->mmap_sem);
++	return;
 +
-+#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
 +/*
-+ * Swap suspend & friends need this for resume because things like the intel-agp
-+ * driver might have split up a kernel 4MB mapping.
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
 + */
-+char __nosavedata swsusp_pg_dir[PAGE_SIZE]
-+	__attribute__ ((aligned (PAGE_SIZE)));
-+
-+static inline void save_pg_dir(void)
-+{
-+	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
-+}
-+#else
-+static inline void save_pg_dir(void)
-+{
-+}
-+#endif
++bad_area:
++	up_read(&mm->mmap_sem);
 +
-+void zap_low_mappings (void)
-+{
-+	int i;
++bad_area_nosemaphore:
++	/* User mode accesses just cause a SIGSEGV */
++	if (error_code & 4) {
++		/* 
++		 * Valid to do another page fault here because this one came 
++		 * from user space.
++		 */
++		if (is_prefetch(regs, address, error_code))
++			return;
 +
-+	save_pg_dir();
++		tsk->thread.cr2 = address;
++		/* Kernel addresses are always protection faults */
++		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++		tsk->thread.trap_no = 14;
++		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
++		return;
++	}
 +
++#ifdef CONFIG_X86_F00F_BUG
 +	/*
-+	 * Zap initial low-memory mappings.
-+	 *
-+	 * Note that "pgd_clear()" doesn't do it for
-+	 * us, because pgd_clear() is a no-op on i386.
++	 * Pentium F0 0F C7 C8 bug workaround.
 +	 */
-+	for (i = 0; i < USER_PTRS_PER_PGD; i++)
-+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
-+		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
-+#else
-+		set_pgd(swapper_pg_dir+i, __pgd(0));
++	if (boot_cpu_data.f00f_bug) {
++		unsigned long nr;
++		
++		nr = (address - idt_descr.address) >> 3;
++
++		if (nr == 6) {
++			do_invalid_op(regs, 0);
++			return;
++		}
++	}
 +#endif
-+	flush_tlb_all();
-+}
 +
-+static int disable_nx __initdata = 0;
-+u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
-+EXPORT_SYMBOL(__supported_pte_mask);
++no_context:
++	/* Are we prepared to handle this kernel fault?  */
++	if (fixup_exception(regs))
++		return;
++
++	/* 
++	 * Valid to do another page fault here, because if this fault
++	 * had been triggered by is_prefetch fixup_exception would have 
++	 * handled it.
++	 */
++ 	if (is_prefetch(regs, address, error_code))
++ 		return;
 +
 +/*
-+ * noexec = on|off
-+ *
-+ * Control non executable mappings.
-+ *
-+ * on      Enable
-+ * off     Disable
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
 + */
-+void __init noexec_setup(const char *str)
-+{
-+	if (!strncmp(str, "on",2) && cpu_has_nx) {
-+		__supported_pte_mask |= _PAGE_NX;
-+		disable_nx = 0;
-+	} else if (!strncmp(str,"off",3)) {
-+		disable_nx = 1;
-+		__supported_pte_mask &= ~_PAGE_NX;
++
++	bust_spinlocks(1);
++
++	if (oops_may_print()) {
++	#ifdef CONFIG_X86_PAE
++		if (error_code & 16) {
++			pte_t *pte = lookup_address(address);
++
++			if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
++				printk(KERN_CRIT "kernel tried to execute "
++					"NX-protected page - exploit attempt? "
++					"(uid: %d)\n", current->uid);
++		}
++	#endif
++		if (address < PAGE_SIZE)
++			printk(KERN_ALERT "BUG: unable to handle kernel NULL "
++					"pointer dereference");
++		else
++			printk(KERN_ALERT "BUG: unable to handle kernel paging"
++					" request");
++		printk(" at virtual address %08lx\n",address);
++		printk(KERN_ALERT " printing eip:\n");
++		printk("%08lx\n", regs->eip);
++		dump_fault_path(address);
 +	}
-+}
++	tsk->thread.cr2 = address;
++	tsk->thread.trap_no = 14;
++	tsk->thread.error_code = error_code;
++	die("Oops", regs, error_code);
++	bust_spinlocks(0);
++	do_exit(SIGKILL);
 +
-+int nx_enabled = 0;
-+#ifdef CONFIG_X86_PAE
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++	up_read(&mm->mmap_sem);
++	if (is_init(tsk)) {
++		yield();
++		down_read(&mm->mmap_sem);
++		goto survive;
++	}
++	printk("VM: killing process %s\n", tsk->comm);
++	if (error_code & 4)
++		do_exit(SIGKILL);
++	goto no_context;
 +
-+static void __init set_nx(void)
++do_sigbus:
++	up_read(&mm->mmap_sem);
++
++	/* Kernel mode? Handle exceptions or die */
++	if (!(error_code & 4))
++		goto no_context;
++
++	/* User space => ok to do another page fault */
++	if (is_prefetch(regs, address, error_code))
++		return;
++
++	tsk->thread.cr2 = address;
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = 14;
++	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++}
++
++#ifndef CONFIG_X86_PAE
++void vmalloc_sync_all(void)
 +{
-+	unsigned int v[4], l, h;
++	/*
++	 * Note that races in the updates of insync and start aren't
++	 * problematic: insync can only get set bits added, and updates to
++	 * start are only improving performance (without affecting correctness
++	 * if undone).
++	 */
++	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
++	static unsigned long start = TASK_SIZE;
++	unsigned long address;
 +
-+	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
-+		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
-+		if ((v[3] & (1 << 20)) && !disable_nx) {
-+			rdmsr(MSR_EFER, l, h);
-+			l |= EFER_NX;
-+			wrmsr(MSR_EFER, l, h);
-+			nx_enabled = 1;
-+			__supported_pte_mask |= _PAGE_NX;
++	BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
++	for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
++		if (!test_bit(pgd_index(address), insync)) {
++			unsigned long flags;
++			struct page *page;
++
++			spin_lock_irqsave(&pgd_lock, flags);
++			for (page = pgd_list; page; page =
++					(struct page *)page->index)
++				if (!vmalloc_sync_one(page_address(page),
++								address)) {
++					BUG_ON(page != pgd_list);
++					break;
++				}
++			spin_unlock_irqrestore(&pgd_lock, flags);
++			if (!page)
++				set_bit(pgd_index(address), insync);
 +		}
++		if (address == start && test_bit(pgd_index(address), insync))
++			start = address + PGDIR_SIZE;
 +	}
 +}
++#endif
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/mm/highmem-xen.c b/arch/i386/mm/highmem-xen.c
+--- a/arch/i386/mm/highmem-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/mm/highmem-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,117 @@
++#include <linux/highmem.h>
++#include <linux/module.h>
++
++void *kmap(struct page *page)
++{
++	might_sleep();
++	if (!PageHighMem(page))
++		return page_address(page);
++	return kmap_high(page);
++}
++
++void kunmap(struct page *page)
++{
++	if (in_interrupt())
++		BUG();
++	if (!PageHighMem(page))
++		return;
++	kunmap_high(page);
++}
 +
 +/*
-+ * Enables/disables executability of a given kernel page and
-+ * returns the previous setting.
++ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
++ * no global lock is needed and because the kmap code must perform a global TLB
++ * invalidation when the kmap pool wraps.
++ *
++ * However when holding an atomic kmap is is not legal to sleep, so atomic
++ * kmaps are appropriate for short, tight code paths only.
 + */
-+int __init set_kernel_exec(unsigned long vaddr, int enable)
++static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
 +{
-+	pte_t *pte;
-+	int ret = 1;
++	enum fixed_addresses idx;
++	unsigned long vaddr;
 +
-+	if (!nx_enabled)
-+		goto out;
++	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
++	pagefault_disable();
++	if (!PageHighMem(page))
++		return page_address(page);
 +
-+	pte = lookup_address(vaddr);
-+	BUG_ON(!pte);
++	idx = type + KM_TYPE_NR*smp_processor_id();
++	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++	if (!pte_none(*(kmap_pte-idx)))
++		BUG();
++	set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
 +
-+	if (!pte_exec_kernel(*pte))
-+		ret = 0;
++	return (void*) vaddr;
++}
 +
-+	if (enable)
-+		pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
-+	else
-+		pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
-+	__flush_tlb_all();
-+out:
-+	return ret;
++void *kmap_atomic(struct page *page, enum km_type type)
++{
++	return __kmap_atomic(page, type, kmap_prot);
 +}
 +
-+#endif
++/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
++void *kmap_atomic_pte(struct page *page, enum km_type type)
++{
++	return __kmap_atomic(page, type, PAGE_KERNEL_RO);
++}
 +
-+/*
-+ * paging_init() sets up the page tables - note that the first 8MB are
-+ * already mapped by head.S.
-+ *
-+ * This routines also unmaps the page at virtual kernel address 0, so
-+ * that we can trap those pesky NULL-reference errors in the kernel.
-+ */
-+void __init paging_init(void)
++void kunmap_atomic(void *kvaddr, enum km_type type)
 +{
-+	int i;
++	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
++	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 +
-+#ifdef CONFIG_X86_PAE
-+	set_nx();
-+	if (nx_enabled)
-+		printk("NX (Execute Disable) protection: active\n");
++	/*
++	 * Force other mappings to Oops if they'll try to access this pte
++	 * without first remap it.  Keeping stale mappings around is a bad idea
++	 * also, in case the page changes cacheability attributes or becomes
++	 * a protected page in a hypervisor.
++	 */
++	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
++		kpte_clear_flush(kmap_pte-idx, vaddr);
++		__flush_tlb_one(vaddr);
++	} else {
++#ifdef CONFIG_DEBUG_HIGHMEM
++		BUG_ON(vaddr < PAGE_OFFSET);
++		BUG_ON(vaddr >= (unsigned long)high_memory);
 +#endif
++	}
 +
-+	pagetable_init();
++	pagefault_enable();
++}
 +
-+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
-+	/*
-+	 * We will bail out later - printk doesn't work right now so
-+	 * the user would just see a hanging kernel.
-+	 * when running as xen domain we are already in PAE mode at
-+	 * this point.
-+	 */
-+	if (cpu_has_pae)
-+		set_in_cr4(X86_CR4_PAE);
-+#endif
-+	__flush_tlb_all();
++/* This is the same as kmap_atomic() but can map memory that doesn't
++ * have a struct page associated with it.
++ */
++void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
++{
++	enum fixed_addresses idx;
++	unsigned long vaddr;
 +
-+	kmap_init();
++	pagefault_disable();
 +
-+	/* Switch to the real shared_info page, and clear the
-+	 * dummy page. */
-+	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-+	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+	memset(empty_zero_page, 0, sizeof(empty_zero_page));
++	idx = type + KM_TYPE_NR*smp_processor_id();
++	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++	set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
 +
-+	/* Setup mapping of lower 1st MB */
-+	for (i = 0; i < NR_FIX_ISAMAPS; i++)
-+		if (is_initial_xendomain())
-+			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
-+		else
-+			__set_fixmap(FIX_ISAMAP_BEGIN - i,
-+				     virt_to_machine(empty_zero_page),
-+				     PAGE_KERNEL_RO);
++	return (void*) vaddr;
 +}
 +
-+/*
-+ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
-+ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
-+ * used to involve black magic jumps to work around some nasty CPU bugs,
-+ * but fortunately the switch to using exceptions got rid of all that.
-+ */
-+
-+static void __init test_wp_bit(void)
++struct page *kmap_atomic_to_page(void *ptr)
 +{
-+	printk("Checking if this processor honours the WP bit even in supervisor mode... ");
++	unsigned long idx, vaddr = (unsigned long)ptr;
++	pte_t *pte;
 +
-+	/* Any page-aligned address will do, the test is non-destructive */
-+	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
-+	boot_cpu_data.wp_works_ok = do_test_wp_bit();
-+	clear_fixmap(FIX_WP_TEST);
++	if (vaddr < FIXADDR_START)
++		return virt_to_page(ptr);
 +
-+	if (!boot_cpu_data.wp_works_ok) {
-+		printk("No.\n");
-+#ifdef CONFIG_X86_WP_WORKS_OK
-+		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
-+#endif
-+	} else {
-+		printk("Ok.\n");
-+	}
++	idx = virt_to_fix(vaddr);
++	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
++	return pte_page(*pte);
 +}
 +
-+static void __init set_max_mapnr_init(void)
-+{
-+#ifdef CONFIG_HIGHMEM
-+	num_physpages = highend_pfn;
++EXPORT_SYMBOL(kmap);
++EXPORT_SYMBOL(kunmap);
++EXPORT_SYMBOL(kmap_atomic);
++EXPORT_SYMBOL(kunmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_to_page);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/mm/hypervisor.c b/arch/i386/mm/hypervisor.c
+--- a/arch/i386/mm/hypervisor.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/mm/hypervisor.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,457 @@
++/******************************************************************************
++ * mm/hypervisor.c
++ * 
++ * Update page tables via the hypervisor.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/features.h>
++#include <xen/interface/memory.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <asm/tlbflush.h>
++
++#ifdef CONFIG_X86_64
++#define pmd_val_ma(v) (v).pmd
 +#else
-+	num_physpages = max_low_pfn;
++#ifdef CONFIG_X86_PAE
++# define pmd_val_ma(v) ((v).pmd)
++# define pud_val_ma(v) ((v).pgd.pgd)
++#else
++# define pmd_val_ma(v) ((v).pud.pgd.pgd)
 +#endif
-+#ifdef CONFIG_FLATMEM
-+	max_mapnr = num_physpages;
 +#endif
-+}
-+
-+static struct kcore_list kcore_mem, kcore_vmalloc; 
 +
-+void __init mem_init(void)
++void xen_l1_entry_update(pte_t *ptr, pte_t val)
 +{
-+	extern int ppro_with_ram_bug(void);
-+	int codesize, reservedpages, datasize, initsize;
-+	int tmp;
-+	int bad_ppro;
-+	unsigned long pfn;
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = pte_val_ma(val);
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+	contiguous_bitmap = alloc_bootmem_low_pages(
-+		(max_low_pfn + 2*BITS_PER_LONG) >> 3);
-+	BUG_ON(!contiguous_bitmap);
-+	memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
++{
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = pmd_val_ma(val);
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+#if defined(CONFIG_SWIOTLB)
-+	swiotlb_init();	
++#ifdef CONFIG_X86_PAE
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
++{
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = pud_val_ma(val);
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
 +#endif
 +
-+#ifdef CONFIG_FLATMEM
-+	if (!mem_map)
-+		BUG();
-+#endif
-+	
-+	bad_ppro = ppro_with_ram_bug();
++#ifdef CONFIG_X86_64
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
++{
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = val.pud;
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+#ifdef CONFIG_HIGHMEM
-+	/* check that fixmap and pkmap do not overlap */
-+	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
-+		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
-+		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
-+				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
-+		BUG();
-+	}
-+#endif
-+ 
-+	set_max_mapnr_init();
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
++{
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = val.pgd;
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif /* CONFIG_X86_64 */
 +
-+#ifdef CONFIG_HIGHMEM
-+	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
-+#else
-+	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
-+#endif
-+	printk("vmalloc area: %lx-%lx, maxmem %lx\n",
-+	       VMALLOC_START,VMALLOC_END,MAXMEM);
-+	BUG_ON(VMALLOC_START > VMALLOC_END);
-+	
-+	/* this will put all low memory onto the freelists */
-+	totalram_pages += free_all_bootmem();
-+	/* XEN: init and count low-mem pages outside initial allocation. */
-+	for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
-+		ClearPageReserved(&mem_map[pfn]);
-+		init_page_count(&mem_map[pfn]);
-+		totalram_pages++;
++void xen_machphys_update(unsigned long mfn, unsigned long pfn)
++{
++	mmu_update_t u;
++	if (xen_feature(XENFEAT_auto_translated_physmap)) {
++		BUG_ON(pfn != mfn);
++		return;
 +	}
++	u.ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
++	u.val = pfn;
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+	reservedpages = 0;
-+	for (tmp = 0; tmp < max_low_pfn; tmp++)
-+		/*
-+		 * Only count reserved RAM pages
-+		 */
-+		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
-+			reservedpages++;
-+
-+	set_highmem_pages_init(bad_ppro);
-+
-+	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
-+	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
-+	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
++void xen_pt_switch(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_NEW_BASEPTR;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
-+	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
-+		   VMALLOC_END-VMALLOC_START);
++void xen_new_user_pt(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_NEW_USER_BASEPTR;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
-+		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-+		num_physpages << (PAGE_SHIFT-10),
-+		codesize >> 10,
-+		reservedpages << (PAGE_SHIFT-10),
-+		datasize >> 10,
-+		initsize >> 10,
-+		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
-+	       );
++void xen_tlb_flush(void)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_tlb_flush);
 +
-+#ifdef CONFIG_X86_PAE
-+	if (!cpu_has_pae)
-+		panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
-+#endif
-+	if (boot_cpu_data.wp_works_ok < 0)
-+		test_wp_bit();
++void xen_invlpg(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_INVLPG_LOCAL;
++	op.arg1.linear_addr = ptr & PAGE_MASK;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_invlpg);
 +
-+	/*
-+	 * Subtle. SMP is doing it's boot stuff late (because it has to
-+	 * fork idle threads) - but it also needs low mappings for the
-+	 * protected-mode entry to work. We zap these entries only after
-+	 * the WP-bit has been tested.
-+	 */
-+#ifndef CONFIG_SMP
-+	zap_low_mappings();
-+#endif
++#ifdef CONFIG_SMP
 +
-+	set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
++void xen_tlb_flush_all(void)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_TLB_FLUSH_ALL;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 +}
 +
-+/*
-+ * this is for the non-NUMA, single node SMP system case.
-+ * Specifically, in the case of x86, we will always add
-+ * memory to the highmem for now.
-+ */
-+#ifdef CONFIG_MEMORY_HOTPLUG
-+#ifndef CONFIG_NEED_MULTIPLE_NODES
-+int arch_add_memory(u64 start, u64 size)
++void xen_tlb_flush_mask(cpumask_t *mask)
 +{
-+	struct pglist_data *pgdata = &contig_page_data;
-+	struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
-+	unsigned long start_pfn = start >> PAGE_SHIFT;
-+	unsigned long nr_pages = size >> PAGE_SHIFT;
++	struct mmuext_op op;
++	if ( cpus_empty(*mask) )
++		return;
++	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
++	op.arg2.vcpumask = mask->bits;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
 +
-+	return __add_pages(zone, start_pfn, nr_pages);
++void xen_invlpg_all(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_INVLPG_ALL;
++	op.arg1.linear_addr = ptr & PAGE_MASK;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 +}
 +
-+int remove_memory(u64 start, u64 size)
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
 +{
-+	return -EINVAL;
++	struct mmuext_op op;
++	if ( cpus_empty(*mask) )
++		return;
++	op.cmd = MMUEXT_INVLPG_MULTI;
++	op.arg1.linear_addr = ptr & PAGE_MASK;
++	op.arg2.vcpumask    = mask->bits;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 +}
-+#endif
-+#endif
 +
-+kmem_cache_t *pgd_cache;
-+kmem_cache_t *pmd_cache;
++#endif /* CONFIG_SMP */
 +
-+void __init pgtable_cache_init(void)
++void xen_pgd_pin(unsigned long ptr)
 +{
-+	if (PTRS_PER_PMD > 1) {
-+		pmd_cache = kmem_cache_create("pmd",
-+					PTRS_PER_PMD*sizeof(pmd_t),
-+					PTRS_PER_PMD*sizeof(pmd_t),
-+					0,
-+					pmd_ctor,
-+					NULL);
-+		if (!pmd_cache)
-+			panic("pgtable_cache_init(): cannot create pmd cache");
-+	}
-+	pgd_cache = kmem_cache_create("pgd",
-+#ifndef CONFIG_XEN
-+				PTRS_PER_PGD*sizeof(pgd_t),
-+				PTRS_PER_PGD*sizeof(pgd_t),
++	struct mmuext_op op;
++#ifdef CONFIG_X86_64
++	op.cmd = MMUEXT_PIN_L4_TABLE;
++#elif defined(CONFIG_X86_PAE)
++	op.cmd = MMUEXT_PIN_L3_TABLE;
 +#else
-+				PAGE_SIZE,
-+				PAGE_SIZE,
++	op.cmd = MMUEXT_PIN_L2_TABLE;
 +#endif
-+				0,
-+				pgd_ctor,
-+				PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
-+	if (!pgd_cache)
-+		panic("pgtable_cache_init(): Cannot create pgd cache");
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pgd_unpin(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_UNPIN_TABLE;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_set_ldt(unsigned long ptr, unsigned long len)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_SET_LDT;
++	op.arg1.linear_addr = ptr;
++	op.arg2.nr_ents     = len;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 +}
 +
 +/*
-+ * This function cannot be __init, since exceptions don't work in that
-+ * section.  Put this after the callers, so that it cannot be inlined.
++ * Bitmap is indexed by page number. If bit is set, the page is part of a
++ * xen_create_contiguous_region() area of memory.
 + */
-+static int noinline do_test_wp_bit(void)
++unsigned long *contiguous_bitmap;
++
++static void contiguous_bitmap_set(
++	unsigned long first_page, unsigned long nr_pages)
 +{
-+	char tmp_reg;
-+	int flag;
++	unsigned long start_off, end_off, curr_idx, end_idx;
 +
-+	__asm__ __volatile__(
-+		"	movb %0,%1	\n"
-+		"1:	movb %1,%0	\n"
-+		"	xorl %2,%2	\n"
-+		"2:			\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 4	\n"
-+		"	.long 1b,2b	\n"
-+		".previous		\n"
-+		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
-+		 "=q" (tmp_reg),
-+		 "=r" (flag)
-+		:"2" (1)
-+		:"memory");
-+	
-+	return flag;
-+}
++	curr_idx  = first_page / BITS_PER_LONG;
++	start_off = first_page & (BITS_PER_LONG-1);
++	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
++	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
 +
-+#ifdef CONFIG_DEBUG_RODATA
++	if (curr_idx == end_idx) {
++		contiguous_bitmap[curr_idx] |=
++			((1UL<<end_off)-1) & -(1UL<<start_off);
++	} else {
++		contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
++		while ( ++curr_idx < end_idx )
++			contiguous_bitmap[curr_idx] = ~0UL;
++		contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
++	}
++}
 +
-+void mark_rodata_ro(void)
++static void contiguous_bitmap_clear(
++	unsigned long first_page, unsigned long nr_pages)
 +{
-+	unsigned long addr = (unsigned long)__start_rodata;
++	unsigned long start_off, end_off, curr_idx, end_idx;
 +
-+	for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
-+		change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
++	curr_idx  = first_page / BITS_PER_LONG;
++	start_off = first_page & (BITS_PER_LONG-1);
++	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
++	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
 +
-+	printk("Write protecting the kernel read-only data: %uk\n",
-+			(__end_rodata - __start_rodata) >> 10);
++	if (curr_idx == end_idx) {
++		contiguous_bitmap[curr_idx] &=
++			-(1UL<<end_off) | ((1UL<<start_off)-1);
++	} else {
++		contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
++		while ( ++curr_idx != end_idx )
++			contiguous_bitmap[curr_idx] = 0;
++		contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
++	}
++}
++
++/* Protected by balloon_lock. */
++#define MAX_CONTIG_ORDER 9 /* 2MB */
++static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
++static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
 +
++/* Ensure multi-page extents are contiguous in machine memory. */
++int xen_create_contiguous_region(
++	unsigned long vstart, unsigned int order, unsigned int address_bits)
++{
++	unsigned long *in_frames = discontig_frames, out_frame;
++	unsigned long  frame, i, flags;
++	long           rc;
++	int            success;
++	struct xen_memory_exchange exchange = {
++		.in = {
++			.nr_extents   = 1UL << order,
++			.extent_order = 0,
++			.domid        = DOMID_SELF
++		},
++		.out = {
++			.nr_extents   = 1,
++			.extent_order = order,
++			.address_bits = address_bits,
++			.domid        = DOMID_SELF
++		}
++	};
 +
 +	/*
-+	 * change_page_attr() requires a global_flush_tlb() call after it.
-+	 * We do this after the printk so that if something went wrong in the
-+	 * change, the printk gets out at least to give a better debug hint
-+	 * of who is the culprit.
++	 * Currently an auto-translated guest will not perform I/O, nor will
++	 * it require PAE page directories below 4GB. Therefore any calls to
++	 * this function are redundant and can be ignored.
 +	 */
-+	global_flush_tlb();
-+}
-+#endif
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		return 0;
 +
-+void free_init_pages(char *what, unsigned long begin, unsigned long end)
-+{
-+	unsigned long addr;
++	if (unlikely(order > MAX_CONTIG_ORDER))
++		return -ENOMEM;
 +
-+	for (addr = begin; addr < end; addr += PAGE_SIZE) {
-+		ClearPageReserved(virt_to_page(addr));
-+		init_page_count(virt_to_page(addr));
-+		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
-+		free_page(addr);
-+		totalram_pages++;
-+	}
-+	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
-+}
++	set_xen_guest_handle(exchange.in.extent_start, in_frames);
++	set_xen_guest_handle(exchange.out.extent_start, &out_frame);
 +
-+void free_initmem(void)
-+{
-+	free_init_pages("unused kernel memory",
-+			(unsigned long)(&__init_begin),
-+			(unsigned long)(&__init_end));
-+}
++	scrub_pages(vstart, 1 << order);
 +
-+#ifdef CONFIG_BLK_DEV_INITRD
-+void free_initrd_mem(unsigned long start, unsigned long end)
-+{
-+	free_init_pages("initrd memory", start, end);
-+}
-+#endif
++	balloon_lock(flags);
 +
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/mm/ioremap-xen.c linux-2.6.18-xen/arch/i386/mm/ioremap-xen.c
---- linux-2.6.18.3/arch/i386/mm/ioremap-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/mm/ioremap-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,443 @@
-+/*
-+ * arch/i386/mm/ioremap.c
-+ *
-+ * Re-map IO memory to kernel address space so that we can access it.
-+ * This is needed for high PCI addresses that aren't mapped in the
-+ * 640k-1MB IO memory area on PC's
-+ *
-+ * (C) Copyright 1995 1996 Linus Torvalds
-+ */
++	/* 1. Zap current PTEs, remembering MFNs. */
++	for (i = 0; i < (1UL<<order); i++) {
++		in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
++		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++					__pte_ma(0), 0);
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++			INVALID_P2M_ENTRY);
++	}
++	if (HYPERVISOR_multicall(cr_mcl, i))
++		BUG();
 +
-+#include <linux/vmalloc.h>
-+#include <linux/init.h>
-+#include <linux/slab.h>
-+#include <linux/module.h>
-+#include <asm/io.h>
-+#include <asm/fixmap.h>
-+#include <asm/cacheflush.h>
-+#include <asm/tlbflush.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
++	/* 2. Get a new contiguous memory extent. */
++	out_frame = __pa(vstart) >> PAGE_SHIFT;
++	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++	success = (exchange.nr_exchanged == (1UL << order));
++	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++	BUG_ON(success && (rc != 0));
++	if (unlikely(rc == -ENOSYS)) {
++		/* Compatibility when XENMEM_exchange is unsupported. */
++		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++					 &exchange.in) != (1UL << order))
++			BUG();
++		success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++						&exchange.out) == 1);
++		if (!success) {
++			/* Couldn't get special memory: fall back to normal. */
++			for (i = 0; i < (1UL<<order); i++)
++				in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
++			if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++						 &exchange.in) != (1UL<<order))
++				BUG();
++		}
++	}
 +
-+#define ISA_START_ADDRESS	0x0
-+#define ISA_END_ADDRESS		0x100000
++	/* 3. Map the new extent in place of old pages. */
++	for (i = 0; i < (1UL<<order); i++) {
++		frame = success ? (out_frame + i) : in_frames[i];
++		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++					pfn_pte_ma(frame, PAGE_KERNEL), 0);
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++	}
 +
-+static int direct_remap_area_pte_fn(pte_t *pte, 
-+				    struct page *pmd_page,
-+				    unsigned long address, 
-+				    void *data)
-+{
-+	mmu_update_t **v = (mmu_update_t **)data;
++	cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++						   ? UVMF_TLB_FLUSH|UVMF_ALL
++						   : UVMF_INVLPG|UVMF_ALL;
++	if (HYPERVISOR_multicall(cr_mcl, i))
++		BUG();
 +
-+	BUG_ON(!pte_none(*pte));
++	if (success)
++		contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
++				      1UL << order);
 +
-+	(*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) <<
-+		     PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
-+	(*v)++;
++	balloon_unlock(flags);
 +
-+	return 0;
++	return success ? 0 : -ENOMEM;
 +}
 +
-+static int __direct_remap_pfn_range(struct mm_struct *mm,
-+				    unsigned long address, 
-+				    unsigned long mfn,
-+				    unsigned long size, 
-+				    pgprot_t prot,
-+				    domid_t  domid)
++void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
 +{
-+	int rc;
-+	unsigned long i, start_address;
-+	mmu_update_t *u, *v, *w;
++	unsigned long *out_frames = discontig_frames, in_frame;
++	unsigned long  frame, i, flags;
++	long           rc;
++	int            success;
++	struct xen_memory_exchange exchange = {
++		.in = {
++			.nr_extents   = 1,
++			.extent_order = order,
++			.domid        = DOMID_SELF
++		},
++		.out = {
++			.nr_extents   = 1UL << order,
++			.extent_order = 0,
++			.domid        = DOMID_SELF
++		}
++	};
 +
-+	u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-+	if (u == NULL)
-+		return -ENOMEM;
++	if (xen_feature(XENFEAT_auto_translated_physmap) ||
++	    !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
++		return;
 +
-+	start_address = address;
++	if (unlikely(order > MAX_CONTIG_ORDER))
++		return;
 +
-+	flush_cache_all();
++	set_xen_guest_handle(exchange.in.extent_start, &in_frame);
++	set_xen_guest_handle(exchange.out.extent_start, out_frames);
 +
-+	for (i = 0; i < size; i += PAGE_SIZE) {
-+		if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
-+			/* Flush a full batch after filling in the PTE ptrs. */
-+			rc = apply_to_page_range(mm, start_address, 
-+						 address - start_address,
-+						 direct_remap_area_pte_fn, &w);
-+			if (rc)
-+				goto out;
-+			rc = -EFAULT;
-+			if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
-+				goto out;
-+			v = w = u;
-+			start_address = address;
-+		}
++	scrub_pages(vstart, 1 << order);
 +
-+		/*
-+		 * Fill in the machine address: PTE ptr is done later by
-+		 * __direct_remap_area_pages(). 
-+		 */
-+		v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
++	balloon_lock(flags);
 +
-+		mfn++;
-+		address += PAGE_SIZE; 
-+		v++;
-+	}
++	contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
 +
-+	if (v != u) {
-+		/* Final batch. */
-+		rc = apply_to_page_range(mm, start_address,
-+					 address - start_address,
-+					 direct_remap_area_pte_fn, &w);
-+		if (rc)
-+			goto out;
-+		rc = -EFAULT;
-+		if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
-+			goto out;
++	/* 1. Find start MFN of contiguous extent. */
++	in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
++
++	/* 2. Zap current PTEs. */
++	for (i = 0; i < (1UL<<order); i++) {
++		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++					__pte_ma(0), 0);
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++			INVALID_P2M_ENTRY);
++		out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
 +	}
++	if (HYPERVISOR_multicall(cr_mcl, i))
++		BUG();
 +
-+	rc = 0;
++	/* 3. Do the exchange for non-contiguous MFNs. */
++	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++	success = (exchange.nr_exchanged == 1);
++	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++	BUG_ON(success && (rc != 0));
++	if (unlikely(rc == -ENOSYS)) {
++		/* Compatibility when XENMEM_exchange is unsupported. */
++		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++					 &exchange.in) != 1)
++			BUG();
++		if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++					 &exchange.out) != (1UL << order))
++			BUG();
++		success = 1;
++	}
 +
-+ out:
-+	flush_tlb_all();
++	/* 4. Map new pages in place of old pages. */
++	for (i = 0; i < (1UL<<order); i++) {
++		frame = success ? out_frames[i] : (in_frame + i);
++		MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++					pfn_pte_ma(frame, PAGE_KERNEL), 0);
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++	}
 +
-+	free_page((unsigned long)u);
++	cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++						   ? UVMF_TLB_FLUSH|UVMF_ALL
++						   : UVMF_INVLPG|UVMF_ALL;
++	if (HYPERVISOR_multicall(cr_mcl, i))
++		BUG();
 +
-+	return rc;
++	balloon_unlock(flags);
 +}
 +
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+			   unsigned long address, 
-+			   unsigned long mfn,
-+			   unsigned long size, 
-+			   pgprot_t prot,
-+			   domid_t  domid)
++#ifdef __i386__
++int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
 +{
-+	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return remap_pfn_range(vma, address, mfn, size, prot);
++	__u32 *lp = (__u32 *)((char *)ldt + entry * 8);
++	maddr_t mach_lp = arbitrary_virt_to_machine(lp);
++	return HYPERVISOR_update_descriptor(
++		mach_lp, (u64)entry_a | ((u64)entry_b<<32));
++}
++#endif
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/mm/init-xen.c b/arch/i386/mm/init-xen.c
+--- a/arch/i386/mm/init-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/mm/init-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,871 @@
++/*
++ *  linux/arch/i386/mm/init.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
 +
-+	if (domid == DOMID_SELF)
-+		return -EINVAL;
++#include <linux/module.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/hugetlb.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/poison.h>
++#include <linux/bootmem.h>
++#include <linux/slab.h>
++#include <linux/proc_fs.h>
++#include <linux/efi.h>
++#include <linux/memory_hotplug.h>
++#include <linux/initrd.h>
++#include <linux/cpumask.h>
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
 +
-+	vma->vm_flags |= VM_IO | VM_RESERVED;
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/sections.h>
++#include <asm/hypervisor.h>
++#include <asm/swiotlb.h>
 +
-+	vma->vm_mm->context.has_foreign_mappings = 1;
++extern unsigned long *contiguous_bitmap;
 +
-+	return __direct_remap_pfn_range(
-+		vma->vm_mm, address, mfn, size, prot, domid);
-+}
-+EXPORT_SYMBOL(direct_remap_pfn_range);
++unsigned int __VMALLOC_RESERVE = 128 << 20;
 +
-+int direct_kernel_remap_pfn_range(unsigned long address, 
-+				  unsigned long mfn,
-+				  unsigned long size, 
-+				  pgprot_t prot,
-+				  domid_t  domid)
-+{
-+	return __direct_remap_pfn_range(
-+		&init_mm, address, mfn, size, prot, domid);
-+}
-+EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++unsigned long highstart_pfn, highend_pfn;
 +
-+static int lookup_pte_fn(
-+	pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
-+{
-+	uint64_t *ptep = (uint64_t *)data;
-+	if (ptep)
-+		*ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) <<
-+			 PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
-+	return 0;
-+}
++static int noinline do_test_wp_bit(void);
 +
-+int create_lookup_pte_addr(struct mm_struct *mm, 
-+			   unsigned long address,
-+			   uint64_t *ptep)
++/*
++ * Creates a middle page table and puts a pointer to it in the
++ * given global directory entry. This only returns the gd entry
++ * in non-PAE compilation mode, since the middle layer is folded.
++ */
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
 +{
-+	return apply_to_page_range(mm, address, PAGE_SIZE,
-+				   lookup_pte_fn, ptep);
-+}
-+
-+EXPORT_SYMBOL(create_lookup_pte_addr);
++	pud_t *pud;
++	pmd_t *pmd_table;
++		
++#ifdef CONFIG_X86_PAE
++	pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++	make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
++	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
++	pud = pud_offset(pgd, 0);
++	if (pmd_table != pmd_offset(pud, 0)) 
++		BUG();
++#else
++	pud = pud_offset(pgd, 0);
++	pmd_table = pmd_offset(pud, 0);
++#endif
 +
-+static int noop_fn(
-+	pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
-+{
-+	return 0;
++	return pmd_table;
 +}
 +
-+int touch_pte_range(struct mm_struct *mm,
-+		    unsigned long address,
-+		    unsigned long size)
-+{
-+	return apply_to_page_range(mm, address, size, noop_fn, NULL);
-+} 
-+
-+EXPORT_SYMBOL(touch_pte_range);
-+
 +/*
-+ * Does @address reside within a non-highmem page that is local to this virtual
-+ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
-+ * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
-+ * why this works.
++ * Create a page table and place a pointer to it in a middle page
++ * directory entry.
 + */
-+static inline int is_local_lowmem(unsigned long address)
++static pte_t * __init one_page_table_init(pmd_t *pmd)
 +{
-+	extern unsigned long max_low_pfn;
-+	return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
++	if (pmd_none(*pmd)) {
++		pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++		make_lowmem_page_readonly(page_table,
++					  XENFEAT_writable_page_tables);
++		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++		if (page_table != pte_offset_kernel(pmd, 0))
++			BUG();	
++
++		return page_table;
++	}
++	
++	return pte_offset_kernel(pmd, 0);
 +}
 +
 +/*
-+ * Generic mapping function (not visible outside):
++ * This function initializes a certain range of kernel virtual memory 
++ * with new bootmem page tables, everywhere page tables are missing in
++ * the given range.
 + */
 +
 +/*
-+ * Remap an arbitrary physical address space into the kernel virtual
-+ * address space. Needed when the kernel wants to access high addresses
-+ * directly.
-+ *
-+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
-+ * have to convert them into an offset in a page-aligned mapping, but the
-+ * caller shouldn't need to know that small detail.
++ * NOTE: The pagetables are allocated contiguous on the physical space 
++ * so we can cache the place of the first one and move around without 
++ * checking the pgd every time.
 + */
-+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
++static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
 +{
-+	void __iomem * addr;
-+	struct vm_struct * area;
-+	unsigned long offset, last_addr;
-+	domid_t domid = DOMID_IO;
-+
-+	/* Don't allow wraparound or zero size */
-+	last_addr = phys_addr + size - 1;
-+	if (!size || last_addr < phys_addr)
-+		return NULL;
-+
-+	/*
-+	 * Don't remap the low PCI/ISA area, it's always mapped..
-+	 */
-+	if (is_initial_xendomain() &&
-+	    phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
-+		return (void __iomem *) isa_bus_to_virt(phys_addr);
-+
-+	/*
-+	 * Don't allow anybody to remap normal RAM that we're using..
-+	 */
-+	if (is_local_lowmem(phys_addr)) {
-+		char *t_addr, *t_end;
-+		struct page *page;
-+
-+		t_addr = bus_to_virt(phys_addr);
-+		t_end = t_addr + (size - 1);
-+	   
-+		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
-+			if(!PageReserved(page))
-+				return NULL;
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	int pgd_idx, pmd_idx;
++	unsigned long vaddr;
 +
-+		domid = DOMID_SELF;
-+	}
++	vaddr = start;
++	pgd_idx = pgd_index(vaddr);
++	pmd_idx = pmd_index(vaddr);
++	pgd = pgd_base + pgd_idx;
 +
-+	/*
-+	 * Mappings have to be page-aligned
-+	 */
-+	offset = phys_addr & ~PAGE_MASK;
-+	phys_addr &= PAGE_MASK;
-+	size = PAGE_ALIGN(last_addr+1) - phys_addr;
++	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
++		if (pgd_none(*pgd)) 
++			one_md_table_init(pgd);
++		pud = pud_offset(pgd, vaddr);
++		pmd = pmd_offset(pud, vaddr);
++		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
++			if (vaddr < HYPERVISOR_VIRT_START && pmd_none(*pmd)) 
++				one_page_table_init(pmd);
 +
-+	/*
-+	 * Ok, go for it..
-+	 */
-+	area = get_vm_area(size, VM_IOREMAP | (flags << 20));
-+	if (!area)
-+		return NULL;
-+	area->phys_addr = phys_addr;
-+	addr = (void __iomem *) area->addr;
-+	flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
-+	if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
-+				     phys_addr>>PAGE_SHIFT,
-+				     size, __pgprot(flags), domid)) {
-+		vunmap((void __force *) addr);
-+		return NULL;
++			vaddr += PMD_SIZE;
++		}
++		pmd_idx = 0;
 +	}
-+	return (void __iomem *) (offset + (char __iomem *)addr);
 +}
-+EXPORT_SYMBOL(__ioremap);
 +
-+/**
-+ * ioremap_nocache     -   map bus memory into CPU space
-+ * @offset:    bus address of the memory
-+ * @size:      size of the resource to map
-+ *
-+ * ioremap_nocache performs a platform specific sequence of operations to
-+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
-+ * writew/writel functions and the other mmio helpers. The returned
-+ * address is not guaranteed to be usable directly as a virtual
-+ * address. 
-+ *
-+ * This version of ioremap ensures that the memory is marked uncachable
-+ * on the CPU as well as honouring existing caching rules from things like
-+ * the PCI bus. Note that there are other caches and buffers on many 
-+ * busses. In particular driver authors should read up on PCI writes
-+ *
-+ * It's useful if some control registers are in such an area and
-+ * write combining or read caching is not desirable:
-+ * 
-+ * Must be freed with iounmap.
-+ */
++static inline int is_kernel_text(unsigned long addr)
++{
++	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
++		return 1;
++	return 0;
++}
 +
-+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
++/*
++ * This maps the physical memory to kernel virtual address space, a total 
++ * of max_low_pfn pages, by creating page tables starting from address 
++ * PAGE_OFFSET.
++ */
++static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
 +{
-+	unsigned long last_addr;
-+	void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
-+	if (!p) 
-+		return p; 
++	unsigned long pfn;
++	pgd_t *pgd;
++	pmd_t *pmd;
++	pte_t *pte;
++	int pgd_idx, pmd_idx, pte_ofs;
 +
-+	/* Guaranteed to be > phys_addr, as per __ioremap() */
-+	last_addr = phys_addr + size - 1;
++	unsigned long max_ram_pfn = xen_start_info->nr_pages;
++	if (max_ram_pfn > max_low_pfn)
++		max_ram_pfn = max_low_pfn;
 +
-+	if (is_local_lowmem(last_addr)) { 
-+		struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
-+		unsigned long npages;
++	pgd_idx = pgd_index(PAGE_OFFSET);
++	pgd = pgd_base + pgd_idx;
++	pfn = 0;
++	pmd_idx = pmd_index(PAGE_OFFSET);
++	pte_ofs = pte_index(PAGE_OFFSET);
 +
-+		phys_addr &= PAGE_MASK;
++	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
++#ifdef CONFIG_XEN
++		/*
++		 * Native linux hasn't PAE-paging enabled yet at this
++		 * point.  When running as xen domain we are in PAE
++		 * mode already, thus we can't simply hook a empty
++		 * pmd.  That would kill the mappings we are currently
++		 * using ...
++		 */
++		pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
++#else
++		pmd = one_md_table_init(pgd);
++#endif
++		if (pfn >= max_low_pfn)
++			continue;
++		pmd += pmd_idx;
++		for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
++			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
++			if (address >= HYPERVISOR_VIRT_START)
++				continue;
 +
-+		/* This might overflow and become zero.. */
-+		last_addr = PAGE_ALIGN(last_addr);
++			/* Map with big pages if possible, otherwise create normal page tables. */
++			if (cpu_has_pse) {
++				unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
 +
-+		/* .. but that's ok, because modulo-2**n arithmetic will make
-+	 	* the page-aligned "last - first" come out right.
-+	 	*/
-+		npages = (last_addr - phys_addr) >> PAGE_SHIFT;
++				if (is_kernel_text(address) || is_kernel_text(address2))
++					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
++				else
++					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
++				pfn += PTRS_PER_PTE;
++			} else {
++				pte = one_page_table_init(pmd);
 +
-+		if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
-+			iounmap(p); 
-+			p = NULL;
++				pte += pte_ofs;
++				for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
++						/* XEN: Only map initial RAM allocation. */
++						if ((pfn >= max_ram_pfn) || pte_present(*pte))
++							continue;
++						if (is_kernel_text(address))
++							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
++						else
++							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
++				}
++				pte_ofs = 0;
++			}
 +		}
-+		global_flush_tlb();
++		pmd_idx = 0;
 +	}
-+
-+	return p;					
 +}
-+EXPORT_SYMBOL(ioremap_nocache);
 +
-+/**
-+ * iounmap - Free a IO remapping
-+ * @addr: virtual address from ioremap_*
-+ *
-+ * Caller must ensure there is only one unmapping for the same pointer.
-+ */
-+void iounmap(volatile void __iomem *addr)
++#ifndef CONFIG_XEN
++
++static inline int page_kills_ppro(unsigned long pagenr)
 +{
-+	struct vm_struct *p, *o;
++	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
++		return 1;
++	return 0;
++}
 +
-+	if ((void __force *)addr <= high_memory)
-+		return;
++#else
 +
-+	/*
-+	 * __ioremap special-cases the PCI/ISA range by not instantiating a
-+	 * vm_area and by simply returning an address into the kernel mapping
-+	 * of ISA space.   So handle that here.
-+	 */
-+	if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-+		return;
++#define page_kills_ppro(p)	0
 +
-+	addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
++#endif
 +
-+	/* Use the vm area unlocked, assuming the caller
-+	   ensures there isn't another iounmap for the same address
-+	   in parallel. Reuse of the virtual address is prevented by
-+	   leaving it in the global lists until we're done with it.
-+	   cpa takes care of the direct mappings. */
-+	read_lock(&vmlist_lock);
-+	for (p = vmlist; p; p = p->next) {
-+		if (p->addr == addr)
-+			break;
-+	}
-+	read_unlock(&vmlist_lock);
++int page_is_ram(unsigned long pagenr)
++{
++	int i;
++	unsigned long addr, end;
 +
-+	if (!p) {
-+		printk("iounmap: bad address %p\n", addr);
-+		dump_stack();
-+		return;
++	if (efi_enabled) {
++		efi_memory_desc_t *md;
++		void *p;
++
++		for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++			md = p;
++			if (!is_available_memory(md))
++				continue;
++			addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++			end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
++
++			if ((pagenr >= addr) && (pagenr < end))
++				return 1;
++		}
++		return 0;
 +	}
 +
-+	/* Reset the direct mapping. Can block */
-+	if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
-+		/* p->size includes the guard page, but cpa doesn't like that */
-+		change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
-+				 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
-+				 PAGE_KERNEL);
-+		global_flush_tlb();
-+	} 
++	for (i = 0; i < e820.nr_map; i++) {
 +
-+	/* Finally remove it */
-+	o = remove_vm_area((void *)addr);
-+	BUG_ON(p != o || o == NULL);
-+	kfree(p); 
++		if (e820.map[i].type != E820_RAM)	/* not usable memory */
++			continue;
++		/*
++		 *	!!!FIXME!!! Some BIOSen report areas as RAM that
++		 *	are not. Notably the 640->1Mb area. We need a sanity
++		 *	check here.
++		 */
++		addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++		end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
++		if  ((pagenr >= addr) && (pagenr < end))
++			return 1;
++	}
++	return 0;
 +}
-+EXPORT_SYMBOL(iounmap);
-+
-+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-+{
-+	unsigned long offset, last_addr;
-+	unsigned int nrpages;
-+	enum fixed_addresses idx;
 +
-+	/* Don't allow wraparound or zero size */
-+	last_addr = phys_addr + size - 1;
-+	if (!size || last_addr < phys_addr)
-+		return NULL;
++#ifdef CONFIG_HIGHMEM
++pte_t *kmap_pte;
++pgprot_t kmap_prot;
 +
-+	/*
-+	 * Don't remap the low PCI/ISA area, it's always mapped..
-+	 */
-+	if (is_initial_xendomain() &&
-+	    phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
-+		return isa_bus_to_virt(phys_addr);
++#define kmap_get_fixmap_pte(vaddr)					\
++	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
 +
-+	/*
-+	 * Mappings have to be page-aligned
-+	 */
-+	offset = phys_addr & ~PAGE_MASK;
-+	phys_addr &= PAGE_MASK;
-+	size = PAGE_ALIGN(last_addr) - phys_addr;
++static void __init kmap_init(void)
++{
++	unsigned long kmap_vstart;
 +
-+	/*
-+	 * Mappings have to fit in the FIX_BTMAP area.
-+	 */
-+	nrpages = size >> PAGE_SHIFT;
-+	if (nrpages > NR_FIX_BTMAPS)
-+		return NULL;
++	/* cache the first kmap pte */
++	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
++	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
 +
-+	/*
-+	 * Ok, go for it..
-+	 */
-+	idx = FIX_BTMAP_BEGIN;
-+	while (nrpages > 0) {
-+		set_fixmap(idx, phys_addr);
-+		phys_addr += PAGE_SIZE;
-+		--idx;
-+		--nrpages;
-+	}
-+	return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
++	kmap_prot = PAGE_KERNEL;
 +}
 +
-+void __init bt_iounmap(void *addr, unsigned long size)
++static void __init permanent_kmaps_init(pgd_t *pgd_base)
 +{
-+	unsigned long virt_addr;
-+	unsigned long offset;
-+	unsigned int nrpages;
-+	enum fixed_addresses idx;
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++	unsigned long vaddr;
 +
-+	virt_addr = (unsigned long)addr;
-+	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
-+		return;
-+	if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-+		return;
-+	offset = virt_addr & ~PAGE_MASK;
-+	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
++	vaddr = PKMAP_BASE;
++	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
 +
-+	idx = FIX_BTMAP_BEGIN;
-+	while (nrpages > 0) {
-+		clear_fixmap(idx);
-+		--idx;
-+		--nrpages;
-+	}
++	pgd = swapper_pg_dir + pgd_index(vaddr);
++	pud = pud_offset(pgd, vaddr);
++	pmd = pmd_offset(pud, vaddr);
++	pte = pte_offset_kernel(pmd, vaddr);
++	pkmap_page_table = pte;	
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/mm/Makefile linux-2.6.18-xen/arch/i386/mm/Makefile
---- linux-2.6.18.3/arch/i386/mm/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/mm/Makefile	2006-11-19 14:26:22.000000000 +0100
-@@ -8,3 +8,11 @@
- obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
- obj-$(CONFIG_HIGHMEM) += highmem.o
- obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
 +
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
++static void __meminit free_new_highpage(struct page *page, int pfn)
++{
++	init_page_count(page);
++	if (pfn < xen_start_info->nr_pages)
++		__free_page(page);
++	totalhigh_pages++;
++}
 +
-+obj-y		+= hypervisor.o
++void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
++{
++	if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
++		ClearPageReserved(page);
++		free_new_highpage(page, pfn);
++	} else
++		SetPageReserved(page);
++}
++
++static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
++{
++	free_new_highpage(page, pfn);
++	totalram_pages++;
++#ifdef CONFIG_FLATMEM
++	max_mapnr = max(pfn, max_mapnr);
++#endif
++	num_physpages++;
++	return 0;
++}
 +
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/mm/pageattr.c linux-2.6.18-xen/arch/i386/mm/pageattr.c
---- linux-2.6.18.3/arch/i386/mm/pageattr.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/mm/pageattr.c	2006-11-19 14:26:22.000000000 +0100
-@@ -84,7 +84,7 @@
- 	unsigned long flags;
- 
- 	set_pte_atomic(kpte, pte); 	/* change init_mm */
--	if (PTRS_PER_PMD > 1)
-+	if (HAVE_SHARED_KERNEL_PMD)
- 		return;
- 
- 	spin_lock_irqsave(&pgd_lock, flags);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/mm/pgtable-xen.c linux-2.6.18-xen/arch/i386/mm/pgtable-xen.c
---- linux-2.6.18.3/arch/i386/mm/pgtable-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/mm/pgtable-xen.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,699 @@
 +/*
-+ *  linux/arch/i386/mm/pgtable.c
++ * Not currently handling the NUMA case.
++ * Assuming single node and all memory that
++ * has been added dynamically that would be
++ * onlined here is in HIGHMEM
 + */
++void __meminit online_page(struct page *page)
++{
++	ClearPageReserved(page);
++	add_one_highpage_hotplug(page, page_to_pfn(page));
++}
 +
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/highmem.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/spinlock.h>
-+#include <linux/module.h>
-+
-+#include <asm/system.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/tlb.h>
-+#include <asm/tlbflush.h>
-+#include <asm/io.h>
-+#include <asm/mmu_context.h>
 +
-+#include <xen/features.h>
-+#include <xen/foreign_page.h>
-+#include <asm/hypervisor.h>
++#ifdef CONFIG_NUMA
++extern void set_highmem_pages_init(int);
++#else
++static void __init set_highmem_pages_init(int bad_ppro)
++{
++	int pfn;
++	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
++		add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
++	totalram_pages += totalhigh_pages;
++}
++#endif /* CONFIG_FLATMEM */
 +
-+static void pgd_test_and_unpin(pgd_t *pgd);
++#else
++#define kmap_init() do { } while (0)
++#define permanent_kmaps_init(pgd_base) do { } while (0)
++#define set_highmem_pages_init(bad_ppro) do { } while (0)
++#endif /* CONFIG_HIGHMEM */
 +
-+void show_mem(void)
-+{
-+	int total = 0, reserved = 0;
-+	int shared = 0, cached = 0;
-+	int highmem = 0;
-+	struct page *page;
-+	pg_data_t *pgdat;
-+	unsigned long i;
-+	unsigned long flags;
++unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
++EXPORT_SYMBOL(__PAGE_KERNEL);
++unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
 +
-+	printk(KERN_INFO "Mem-info:\n");
-+	show_free_areas();
-+	printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-+	for_each_online_pgdat(pgdat) {
-+		pgdat_resize_lock(pgdat, &flags);
-+		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-+			page = pgdat_page_nr(pgdat, i);
-+			total++;
-+			if (PageHighMem(page))
-+				highmem++;
-+			if (PageReserved(page))
-+				reserved++;
-+			else if (PageSwapCache(page))
-+				cached++;
-+			else if (page_count(page))
-+				shared += page_count(page) - 1;
-+		}
-+		pgdat_resize_unlock(pgdat, &flags);
-+	}
-+	printk(KERN_INFO "%d pages of RAM\n", total);
-+	printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
-+	printk(KERN_INFO "%d reserved pages\n", reserved);
-+	printk(KERN_INFO "%d pages shared\n", shared);
-+	printk(KERN_INFO "%d pages swap cached\n", cached);
++#ifdef CONFIG_NUMA
++extern void __init remap_numa_kva(void);
++#else
++#define remap_numa_kva() do {} while (0)
++#endif
 +
-+	printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
-+	printk(KERN_INFO "%lu pages writeback\n",
-+					global_page_state(NR_WRITEBACK));
-+	printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
-+	printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
-+	printk(KERN_INFO "%lu pages pagetables\n",
-+					global_page_state(NR_PAGETABLE));
-+}
++pgd_t *swapper_pg_dir;
 +
-+/*
-+ * Associate a virtual page frame with a given physical page frame 
-+ * and protection flags for that frame.
-+ */ 
-+static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
++static void __init pagetable_init (void)
 +{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
++	unsigned long vaddr;
++	pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
 +
-+	pgd = swapper_pg_dir + pgd_index(vaddr);
-+	if (pgd_none(*pgd)) {
-+		BUG();
-+		return;
-+	}
-+	pud = pud_offset(pgd, vaddr);
-+	if (pud_none(*pud)) {
-+		BUG();
-+		return;
++	swapper_pg_dir = pgd_base;
++	init_mm.pgd    = pgd_base;
++
++	/* Enable PSE if available */
++	if (cpu_has_pse) {
++		set_in_cr4(X86_CR4_PSE);
 +	}
-+	pmd = pmd_offset(pud, vaddr);
-+	if (pmd_none(*pmd)) {
-+		BUG();
-+		return;
++
++	/* Enable PGE if available */
++	if (cpu_has_pge) {
++		set_in_cr4(X86_CR4_PGE);
++		__PAGE_KERNEL |= _PAGE_GLOBAL;
++		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
 +	}
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	/* <pfn,flags> stored as-is, to permit clearing entries */
-+	set_pte(pte, pfn_pte(pfn, flags));
++
++	kernel_physical_mapping_init(pgd_base);
++	remap_numa_kva();
 +
 +	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
++	 * Fixed mappings, only the page table structure has to be
++	 * created - mappings will be set by set_fixmap():
 +	 */
-+	__flush_tlb_one(vaddr);
++	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
++	page_table_range_init(vaddr, 0, pgd_base);
++
++	permanent_kmaps_init(pgd_base);
 +}
 +
++#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
 +/*
-+ * Associate a virtual page frame with a given physical page frame 
-+ * and protection flags for that frame.
-+ */ 
-+static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
-+			   pgprot_t flags)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
++ * Swap suspend & friends need this for resume because things like the intel-agp
++ * driver might have split up a kernel 4MB mapping.
++ */
++char __nosavedata swsusp_pg_dir[PAGE_SIZE]
++	__attribute__ ((aligned (PAGE_SIZE)));
 +
-+	pgd = swapper_pg_dir + pgd_index(vaddr);
-+	if (pgd_none(*pgd)) {
-+		BUG();
-+		return;
-+	}
-+	pud = pud_offset(pgd, vaddr);
-+	if (pud_none(*pud)) {
-+		BUG();
-+		return;
-+	}
-+	pmd = pmd_offset(pud, vaddr);
-+	if (pmd_none(*pmd)) {
-+		BUG();
-+		return;
-+	}
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	/* <pfn,flags> stored as-is, to permit clearing entries */
-+	set_pte(pte, pfn_pte_ma(pfn, flags));
++static inline void save_pg_dir(void)
++{
++	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
++}
++#else
++static inline void save_pg_dir(void)
++{
++}
++#endif
++
++void zap_low_mappings (void)
++{
++	int i;
++
++	save_pg_dir();
 +
 +	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
++	 * Zap initial low-memory mappings.
++	 *
++	 * Note that "pgd_clear()" doesn't do it for
++	 * us, because pgd_clear() is a no-op on i386.
 +	 */
-+	__flush_tlb_one(vaddr);
++	for (i = 0; i < USER_PTRS_PER_PGD; i++)
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
++#else
++		set_pgd(swapper_pg_dir+i, __pgd(0));
++#endif
++	flush_tlb_all();
 +}
 +
++static int disable_nx __initdata = 0;
++u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
++EXPORT_SYMBOL(__supported_pte_mask);
++
 +/*
-+ * Associate a large virtual page frame with a given physical page frame 
-+ * and protection flags for that frame. pfn is for the base of the page,
-+ * vaddr is what the page gets mapped to - both must be properly aligned. 
-+ * The pmd must already be instantiated. Assumes PAE mode.
-+ */ 
-+void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
++ * noexec = on|off
++ *
++ * Control non executable mappings.
++ *
++ * on      Enable
++ * off     Disable
++ */
++static int __init noexec_setup(char *str)
 +{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
++	if (!str || !strcmp(str, "on")) {
++		if (cpu_has_nx) {
++			__supported_pte_mask |= _PAGE_NX;
++			disable_nx = 0;
++		}
++	} else if (!strcmp(str,"off")) {
++		disable_nx = 1;
++		__supported_pte_mask &= ~_PAGE_NX;
++	} else
++		return -EINVAL;
 +
-+	if (vaddr & (PMD_SIZE-1)) {		/* vaddr is misaligned */
-+		printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
-+		return; /* BUG(); */
-+	}
-+	if (pfn & (PTRS_PER_PTE-1)) {		/* pfn is misaligned */
-+		printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
-+		return; /* BUG(); */
-+	}
-+	pgd = swapper_pg_dir + pgd_index(vaddr);
-+	if (pgd_none(*pgd)) {
-+		printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
-+		return; /* BUG(); */
-+	}
-+	pud = pud_offset(pgd, vaddr);
-+	pmd = pmd_offset(pud, vaddr);
-+	set_pmd(pmd, pfn_pmd(pfn, flags));
-+	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
-+	 */
-+	__flush_tlb_one(vaddr);
++	return 0;
 +}
++early_param("noexec", noexec_setup);
 +
-+static int nr_fixmaps = 0;
-+unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
-+EXPORT_SYMBOL(__FIXADDR_TOP);
++int nx_enabled = 0;
++#ifdef CONFIG_X86_PAE
 +
-+void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
++static void __init set_nx(void)
 +{
-+	unsigned long address = __fix_to_virt(idx);
++	unsigned int v[4], l, h;
 +
-+	if (idx >= __end_of_fixed_addresses) {
-+		BUG();
-+		return;
-+	}
-+	switch (idx) {
-+	case FIX_WP_TEST:
-+#ifdef CONFIG_X86_F00F_BUG
-+	case FIX_F00F_IDT:
-+#endif
-+		set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
-+		break;
-+	default:
-+		set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
-+		break;
++	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
++		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
++		if ((v[3] & (1 << 20)) && !disable_nx) {
++			rdmsr(MSR_EFER, l, h);
++			l |= EFER_NX;
++			wrmsr(MSR_EFER, l, h);
++			nx_enabled = 1;
++			__supported_pte_mask |= _PAGE_NX;
++		}
 +	}
-+	nr_fixmaps++;
 +}
 +
-+void set_fixaddr_top(unsigned long top)
++/*
++ * Enables/disables executability of a given kernel page and
++ * returns the previous setting.
++ */
++int __init set_kernel_exec(unsigned long vaddr, int enable)
 +{
-+	BUG_ON(nr_fixmaps > 0);
-+	__FIXADDR_TOP = top - PAGE_SIZE;
-+}
++	pte_t *pte;
++	int ret = 1;
 +
-+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-+{
-+	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-+	if (pte)
-+		make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
-+	return pte;
++	if (!nx_enabled)
++		goto out;
++
++	pte = lookup_address(vaddr);
++	BUG_ON(!pte);
++
++	if (!pte_exec_kernel(*pte))
++		ret = 0;
++
++	if (enable)
++		pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
++	else
++		pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
++	pte_update_defer(&init_mm, vaddr, pte);
++	__flush_tlb_all();
++out:
++	return ret;
 +}
 +
-+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++#endif
++
++/*
++ * paging_init() sets up the page tables - note that the first 8MB are
++ * already mapped by head.S.
++ *
++ * This routines also unmaps the page at virtual kernel address 0, so
++ * that we can trap those pesky NULL-reference errors in the kernel.
++ */
++void __init paging_init(void)
 +{
-+	struct page *pte;
++	int i;
 +
-+#ifdef CONFIG_HIGHPTE
-+	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
-+#else
-+	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-+	if (pte) {
-+		SetPageForeign(pte, pte_free);
-+		init_page_count(pte);
-+	}
++#ifdef CONFIG_X86_PAE
++	set_nx();
++	if (nx_enabled)
++		printk("NX (Execute Disable) protection: active\n");
 +#endif
-+	return pte;
-+}
 +
-+void pte_free(struct page *pte)
-+{
-+	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
++	pagetable_init();
 +
-+	if (!pte_write(*virt_to_ptep(va)))
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++	/*
++	 * We will bail out later - printk doesn't work right now so
++	 * the user would just see a hanging kernel.
++	 * when running as xen domain we are already in PAE mode at
++	 * this point.
++	 */
++	if (cpu_has_pae)
++		set_in_cr4(X86_CR4_PAE);
++#endif
++	__flush_tlb_all();
 +
-+	ClearPageForeign(pte);
-+	init_page_count(pte);
++	kmap_init();
 +
-+	__free_page(pte);
-+}
++	/* Switch to the real shared_info page, and clear the
++	 * dummy page. */
++	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++	memset(empty_zero_page, 0, sizeof(empty_zero_page));
 +
-+void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
-+{
-+	memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++	/* Setup mapping of lower 1st MB */
++	for (i = 0; i < NR_FIX_ISAMAPS; i++)
++		if (is_initial_xendomain())
++			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++		else
++			__set_fixmap(FIX_ISAMAP_BEGIN - i,
++				     virt_to_machine(empty_zero_page),
++				     PAGE_KERNEL_RO);
 +}
 +
 +/*
-+ * List of all pgd's needed for non-PAE so it can invalidate entries
-+ * in both cached and uncached pgd's; not needed for PAE since the
-+ * kernel pmd is shared. If PAE were not to share the pmd a similar
-+ * tactic would be needed. This is essentially codepath-based locking
-+ * against pageattr.c; it is the unique case in which a valid change
-+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
-+ * vmalloc faults work because attached pagetables are never freed.
-+ * The locking scheme was chosen on the basis of manfred's
-+ * recommendations and having no core impact whatsoever.
-+ * -- wli
++ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
++ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
++ * used to involve black magic jumps to work around some nasty CPU bugs,
++ * but fortunately the switch to using exceptions got rid of all that.
 + */
-+DEFINE_SPINLOCK(pgd_lock);
-+struct page *pgd_list;
-+
-+static inline void pgd_list_add(pgd_t *pgd)
-+{
-+	struct page *page = virt_to_page(pgd);
-+	page->index = (unsigned long)pgd_list;
-+	if (pgd_list)
-+		set_page_private(pgd_list, (unsigned long)&page->index);
-+	pgd_list = page;
-+	set_page_private(page, (unsigned long)&pgd_list);
-+}
 +
-+static inline void pgd_list_del(pgd_t *pgd)
++static void __init test_wp_bit(void)
 +{
-+	struct page *next, **pprev, *page = virt_to_page(pgd);
-+	next = (struct page *)page->index;
-+	pprev = (struct page **)page_private(page);
-+	*pprev = next;
-+	if (next)
-+		set_page_private(next, (unsigned long)pprev);
-+}
++	printk("Checking if this processor honours the WP bit even in supervisor mode... ");
 +
-+void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
-+{
-+	unsigned long flags;
++	/* Any page-aligned address will do, the test is non-destructive */
++	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
++	boot_cpu_data.wp_works_ok = do_test_wp_bit();
++	clear_fixmap(FIX_WP_TEST);
 +
-+	if (PTRS_PER_PMD > 1) {
-+		if (HAVE_SHARED_KERNEL_PMD)
-+			clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+					swapper_pg_dir + USER_PTRS_PER_PGD,
-+					KERNEL_PGD_PTRS);
++	if (!boot_cpu_data.wp_works_ok) {
++		printk("No.\n");
++#ifdef CONFIG_X86_WP_WORKS_OK
++		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
++#endif
 +	} else {
-+		spin_lock_irqsave(&pgd_lock, flags);
-+		clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+				swapper_pg_dir + USER_PTRS_PER_PGD,
-+				KERNEL_PGD_PTRS);
-+		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-+		pgd_list_add(pgd);
-+		spin_unlock_irqrestore(&pgd_lock, flags);
++		printk("Ok.\n");
 +	}
 +}
 +
-+/* never called when PTRS_PER_PMD > 1 */
-+void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
++static struct kcore_list kcore_mem, kcore_vmalloc; 
++
++void __init mem_init(void)
 +{
-+	unsigned long flags; /* can be called from interrupt context */
++	extern int ppro_with_ram_bug(void);
++	int codesize, reservedpages, datasize, initsize;
++	int tmp;
++	int bad_ppro;
++	unsigned long pfn;
 +
-+	spin_lock_irqsave(&pgd_lock, flags);
-+	pgd_list_del(pgd);
-+	spin_unlock_irqrestore(&pgd_lock, flags);
++	contiguous_bitmap = alloc_bootmem_low_pages(
++		(max_low_pfn + 2*BITS_PER_LONG) >> 3);
++	BUG_ON(!contiguous_bitmap);
++	memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
 +
-+	pgd_test_and_unpin(pgd);
-+}
++#if defined(CONFIG_SWIOTLB)
++	swiotlb_init();	
++#endif
 +
-+pgd_t *pgd_alloc(struct mm_struct *mm)
-+{
-+	int i;
-+	pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
-+	pmd_t **pmd;
-+	unsigned long flags;
++#ifdef CONFIG_FLATMEM
++	BUG_ON(!mem_map);
++#endif
++	
++	bad_ppro = ppro_with_ram_bug();
 +
-+	pgd_test_and_unpin(pgd);
++#ifdef CONFIG_HIGHMEM
++	/* check that fixmap and pkmap do not overlap */
++	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
++		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
++		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
++				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
++		BUG();
++	}
++#endif
++ 
++	printk("vmalloc area: %lx-%lx, maxmem %lx\n",
++	       VMALLOC_START,VMALLOC_END,MAXMEM);
++	BUG_ON(VMALLOC_START > VMALLOC_END);
++	
++	/* this will put all low memory onto the freelists */
++	totalram_pages += free_all_bootmem();
++	/* XEN: init and count low-mem pages outside initial allocation. */
++	for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
++		ClearPageReserved(&mem_map[pfn]);
++		init_page_count(&mem_map[pfn]);
++		totalram_pages++;
++	}
 +
-+	if (PTRS_PER_PMD == 1 || !pgd)
-+		return pgd;
++	reservedpages = 0;
++	for (tmp = 0; tmp < max_low_pfn; tmp++)
++		/*
++		 * Only count reserved RAM pages
++		 */
++		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
++			reservedpages++;
 +
-+	if (HAVE_SHARED_KERNEL_PMD) {
-+		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
-+			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-+			if (!pmd)
-+				goto out_oom;
-+			set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
-+		}
-+		return pgd;
-+	}
++	set_highmem_pages_init(bad_ppro);
 +
-+	/*
-+	 * We can race save/restore (if we sleep during a GFP_KERNEL memory
-+	 * allocation). We therefore store virtual addresses of pmds as they
-+	 * do not change across save/restore, and poke the machine addresses
-+	 * into the pgdir under the pgd_lock.
-+	 */
-+	pmd = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
-+	if (!pmd) {
-+		kmem_cache_free(pgd_cache, pgd);
-+		return NULL;
-+	}
++	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
++	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
++	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
 +
-+	/* Allocate pmds, remember virtual addresses. */
-+	for (i = 0; i < PTRS_PER_PGD; ++i) {
-+		pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-+		if (!pmd[i])
-+			goto out_oom;
-+	}
++	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
++	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
++		   VMALLOC_END-VMALLOC_START);
 +
-+	spin_lock_irqsave(&pgd_lock, flags);
++	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
++		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++		num_physpages << (PAGE_SHIFT-10),
++		codesize >> 10,
++		reservedpages << (PAGE_SHIFT-10),
++		datasize >> 10,
++		initsize >> 10,
++		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
++	       );
 +
-+	/* Protect against save/restore: move below 4GB under pgd_lock. */
-+	if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
-+		int rc = xen_create_contiguous_region(
-+			(unsigned long)pgd, 0, 32);
-+		if (rc) {
-+			spin_unlock_irqrestore(&pgd_lock, flags);
-+			goto out_oom;
-+		}
-+	}
++#if 1 /* double-sanity-check paranoia */
++	printk("virtual kernel memory layout:\n"
++	       "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
++#ifdef CONFIG_HIGHMEM
++	       "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
++#endif
++	       "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
++	       "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
++	       "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
++	       "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
++	       "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
++	       FIXADDR_START, FIXADDR_TOP,
++	       (FIXADDR_TOP - FIXADDR_START) >> 10,
 +
-+	/* Copy kernel pmd contents and write-protect the new pmds. */
-+	for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+		unsigned long v = (unsigned long)i << PGDIR_SHIFT;
-+		pgd_t *kpgd = pgd_offset_k(v);
-+		pud_t *kpud = pud_offset(kpgd, v);
-+		pmd_t *kpmd = pmd_offset(kpud, v);
-+		memcpy(pmd[i], kpmd, PAGE_SIZE);
-+		make_lowmem_page_readonly(
-+			pmd[i], XENFEAT_writable_page_tables);
-+	}
++#ifdef CONFIG_HIGHMEM
++	       PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
++	       (LAST_PKMAP*PAGE_SIZE) >> 10,
++#endif
 +
-+	/* It is safe to poke machine addresses of pmds under the pmd_lock. */
-+	for (i = 0; i < PTRS_PER_PGD; i++)
-+		set_pgd(&pgd[i], __pgd(1 + __pa(pmd[i])));
++	       VMALLOC_START, VMALLOC_END,
++	       (VMALLOC_END - VMALLOC_START) >> 20,
 +
-+	/* Ensure this pgd gets picked up and pinned on save/restore. */
-+	pgd_list_add(pgd);
++	       (unsigned long)__va(0), (unsigned long)high_memory,
++	       ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
 +
-+	spin_unlock_irqrestore(&pgd_lock, flags);
++	       (unsigned long)&__init_begin, (unsigned long)&__init_end,
++	       ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
 +
-+	kfree(pmd);
++	       (unsigned long)&_etext, (unsigned long)&_edata,
++	       ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
 +
-+	return pgd;
++	       (unsigned long)&_text, (unsigned long)&_etext,
++	       ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
 +
-+out_oom:
-+	if (HAVE_SHARED_KERNEL_PMD) {
-+		for (i--; i >= 0; i--)
-+			kmem_cache_free(pmd_cache,
-+					(void *)__va(pgd_val(pgd[i])-1));
-+	} else {
-+		for (i--; i >= 0; i--)
-+			kmem_cache_free(pmd_cache, pmd[i]);
-+		kfree(pmd);
++#ifdef CONFIG_HIGHMEM
++	BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
++	BUG_ON(VMALLOC_END                     > PKMAP_BASE);
++#endif
++	BUG_ON(VMALLOC_START                   > VMALLOC_END);
++	BUG_ON((unsigned long)high_memory      > VMALLOC_START);
++#endif /* double-sanity-check paranoia */
++
++#ifdef CONFIG_X86_PAE
++	if (!cpu_has_pae)
++		panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
++#endif
++	if (boot_cpu_data.wp_works_ok < 0)
++		test_wp_bit();
++
++	/*
++	 * Subtle. SMP is doing it's boot stuff late (because it has to
++	 * fork idle threads) - but it also needs low mappings for the
++	 * protected-mode entry to work. We zap these entries only after
++	 * the WP-bit has been tested.
++	 */
++#ifndef CONFIG_SMP
++	zap_low_mappings();
++#endif
++
++	set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
++}
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++int arch_add_memory(int nid, u64 start, u64 size)
++{
++	struct pglist_data *pgdata = NODE_DATA(nid);
++	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
++	unsigned long start_pfn = start >> PAGE_SHIFT;
++	unsigned long nr_pages = size >> PAGE_SHIFT;
++
++	return __add_pages(zone, start_pfn, nr_pages);
++}
++
++int remove_memory(u64 start, u64 size)
++{
++	return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(remove_memory);
++#endif
++
++struct kmem_cache *pgd_cache;
++struct kmem_cache *pmd_cache;
++
++void __init pgtable_cache_init(void)
++{
++	if (PTRS_PER_PMD > 1) {
++		pmd_cache = kmem_cache_create("pmd",
++					PTRS_PER_PMD*sizeof(pmd_t),
++					PTRS_PER_PMD*sizeof(pmd_t),
++					0,
++					pmd_ctor,
++					NULL);
++		if (!pmd_cache)
++			panic("pgtable_cache_init(): cannot create pmd cache");
 +	}
-+	kmem_cache_free(pgd_cache, pgd);
-+	return NULL;
++	pgd_cache = kmem_cache_create("pgd",
++#ifndef CONFIG_XEN
++				PTRS_PER_PGD*sizeof(pgd_t),
++				PTRS_PER_PGD*sizeof(pgd_t),
++#else
++				PAGE_SIZE,
++				PAGE_SIZE,
++#endif
++				0,
++				pgd_ctor,
++				PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
++	if (!pgd_cache)
++		panic("pgtable_cache_init(): Cannot create pgd cache");
 +}
 +
-+void pgd_free(pgd_t *pgd)
++/*
++ * This function cannot be __init, since exceptions don't work in that
++ * section.  Put this after the callers, so that it cannot be inlined.
++ */
++static int noinline do_test_wp_bit(void)
 +{
-+	int i;
++	char tmp_reg;
++	int flag;
 +
-+	/*
-+	 * After this the pgd should not be pinned for the duration of this
-+	 * function's execution. We should never sleep and thus never race:
-+	 *  1. User pmds will not become write-protected under our feet due
-+	 *     to a concurrent mm_pin_all().
-+	 *  2. The machine addresses in PGD entries will not become invalid
-+	 *     due to a concurrent save/restore.
-+	 */
-+	pgd_test_and_unpin(pgd);
++	__asm__ __volatile__(
++		"	movb %0,%1	\n"
++		"1:	movb %1,%0	\n"
++		"	xorl %2,%2	\n"
++		"2:			\n"
++		".section __ex_table,\"a\"\n"
++		"	.align 4	\n"
++		"	.long 1b,2b	\n"
++		".previous		\n"
++		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
++		 "=q" (tmp_reg),
++		 "=r" (flag)
++		:"2" (1)
++		:"memory");
++	
++	return flag;
++}
 +
-+	/* in the PAE case user pgd entries are overwritten before usage */
-+	if (PTRS_PER_PMD > 1) {
-+		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
-+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+			kmem_cache_free(pmd_cache, pmd);
-+		}
++#ifdef CONFIG_DEBUG_RODATA
 +
-+		if (!HAVE_SHARED_KERNEL_PMD) {
-+			unsigned long flags;
-+			spin_lock_irqsave(&pgd_lock, flags);
-+			pgd_list_del(pgd);
-+			spin_unlock_irqrestore(&pgd_lock, flags);
++void mark_rodata_ro(void)
++{
++	unsigned long addr = (unsigned long)__start_rodata;
 +
-+			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+				make_lowmem_page_writable(
-+					pmd, XENFEAT_writable_page_tables);
-+				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-+				kmem_cache_free(pmd_cache, pmd);
-+			}
++	for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
++		change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
 +
-+			if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
-+				xen_destroy_contiguous_region(
-+					(unsigned long)pgd, 0);
-+		}
-+	}
++	printk("Write protecting the kernel read-only data: %uk\n",
++			(__end_rodata - __start_rodata) >> 10);
 +
-+	/* in the non-PAE case, free_pgtables() clears user pgd entries */
-+	kmem_cache_free(pgd_cache, pgd);
++	/*
++	 * change_page_attr() requires a global_flush_tlb() call after it.
++	 * We do this after the printk so that if something went wrong in the
++	 * change, the printk gets out at least to give a better debug hint
++	 * of who is the culprit.
++	 */
++	global_flush_tlb();
 +}
++#endif
 +
-+void make_lowmem_page_readonly(void *va, unsigned int feature)
++void free_init_pages(char *what, unsigned long begin, unsigned long end)
 +{
-+	pte_t *pte;
-+	int rc;
++	unsigned long addr;
 +
-+	if (xen_feature(feature))
-+		return;
++	for (addr = begin; addr < end; addr += PAGE_SIZE) {
++		ClearPageReserved(virt_to_page(addr));
++		init_page_count(virt_to_page(addr));
++		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
++		free_page(addr);
++		totalram_pages++;
++	}
++	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
++}
 +
-+	pte = virt_to_ptep(va);
-+	rc = HYPERVISOR_update_va_mapping(
-+		(unsigned long)va, pte_wrprotect(*pte), 0);
-+	BUG_ON(rc);
++void free_initmem(void)
++{
++	free_init_pages("unused kernel memory",
++			(unsigned long)(&__init_begin),
++			(unsigned long)(&__init_end));
 +}
 +
-+void make_lowmem_page_writable(void *va, unsigned int feature)
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
 +{
-+	pte_t *pte;
-+	int rc;
++	free_init_pages("initrd memory", start, end);
++}
++#endif
 +
-+	if (xen_feature(feature))
-+		return;
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/mm/ioremap-xen.c b/arch/i386/mm/ioremap-xen.c
+--- a/arch/i386/mm/ioremap-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/mm/ioremap-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,443 @@
++/*
++ * arch/i386/mm/ioremap.c
++ *
++ * Re-map IO memory to kernel address space so that we can access it.
++ * This is needed for high PCI addresses that aren't mapped in the
++ * 640k-1MB IO memory area on PC's
++ *
++ * (C) Copyright 1995 1996 Linus Torvalds
++ */
 +
-+	pte = virt_to_ptep(va);
-+	rc = HYPERVISOR_update_va_mapping(
-+		(unsigned long)va, pte_mkwrite(*pte), 0);
-+	BUG_ON(rc);
-+}
++#include <linux/vmalloc.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/io.h>
++#include <asm/fixmap.h>
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
 +
-+void make_page_readonly(void *va, unsigned int feature)
++#define ISA_START_ADDRESS	0x0
++#define ISA_END_ADDRESS		0x100000
++
++static int direct_remap_area_pte_fn(pte_t *pte, 
++				    struct page *pmd_page,
++				    unsigned long address, 
++				    void *data)
 +{
-+	pte_t *pte;
-+	int rc;
++	mmu_update_t **v = (mmu_update_t **)data;
 +
-+	if (xen_feature(feature))
-+		return;
++	BUG_ON(!pte_none(*pte));
 +
-+	pte = virt_to_ptep(va);
-+	rc = HYPERVISOR_update_va_mapping(
-+		(unsigned long)va, pte_wrprotect(*pte), 0);
-+	if (rc) /* fallback? */
-+		xen_l1_entry_update(pte, pte_wrprotect(*pte));
-+	if ((unsigned long)va >= (unsigned long)high_memory) {
-+		unsigned long pfn = pte_pfn(*pte);
-+#ifdef CONFIG_HIGHMEM
-+		if (pfn >= highstart_pfn)
-+			kmap_flush_unused(); /* flush stale writable kmaps */
-+		else
-+#endif
-+			make_lowmem_page_readonly(
-+				phys_to_virt(pfn << PAGE_SHIFT), feature); 
-+	}
++	(*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) <<
++		     PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++	(*v)++;
++
++	return 0;
 +}
 +
-+void make_page_writable(void *va, unsigned int feature)
++static int __direct_remap_pfn_range(struct mm_struct *mm,
++				    unsigned long address, 
++				    unsigned long mfn,
++				    unsigned long size, 
++				    pgprot_t prot,
++				    domid_t  domid)
 +{
-+	pte_t *pte;
 +	int rc;
++	unsigned long i, start_address;
++	mmu_update_t *u, *v, *w;
 +
-+	if (xen_feature(feature))
-+		return;
++	u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
++	if (u == NULL)
++		return -ENOMEM;
 +
-+	pte = virt_to_ptep(va);
-+	rc = HYPERVISOR_update_va_mapping(
-+		(unsigned long)va, pte_mkwrite(*pte), 0);
-+	if (rc) /* fallback? */
-+		xen_l1_entry_update(pte, pte_mkwrite(*pte));
-+	if ((unsigned long)va >= (unsigned long)high_memory) {
-+		unsigned long pfn = pte_pfn(*pte); 
-+#ifdef CONFIG_HIGHMEM
-+		if (pfn < highstart_pfn)
-+#endif
-+			make_lowmem_page_writable(
-+				phys_to_virt(pfn << PAGE_SHIFT), feature);
-+	}
-+}
++	start_address = address;
 +
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
-+{
-+	if (xen_feature(feature))
-+		return;
++	flush_cache_all();
 +
-+	while (nr-- != 0) {
-+		make_page_readonly(va, feature);
-+		va = (void *)((unsigned long)va + PAGE_SIZE);
-+	}
-+}
++	for (i = 0; i < size; i += PAGE_SIZE) {
++		if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
++			/* Flush a full batch after filling in the PTE ptrs. */
++			rc = apply_to_page_range(mm, start_address, 
++						 address - start_address,
++						 direct_remap_area_pte_fn, &w);
++			if (rc)
++				goto out;
++			rc = -EFAULT;
++			if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
++				goto out;
++			v = w = u;
++			start_address = address;
++		}
 +
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
-+{
-+	if (xen_feature(feature))
-+		return;
++		/*
++		 * Fill in the machine address: PTE ptr is done later by
++		 * __direct_remap_area_pages(). 
++		 */
++		v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
 +
-+	while (nr-- != 0) {
-+		make_page_writable(va, feature);
-+		va = (void *)((unsigned long)va + PAGE_SIZE);
++		mfn++;
++		address += PAGE_SIZE; 
++		v++;
 +	}
-+}
 +
-+static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
-+{
-+	struct page *page = virt_to_page(pt);
-+	unsigned long pfn = page_to_pfn(page);
++	if (v != u) {
++		/* Final batch. */
++		rc = apply_to_page_range(mm, start_address,
++					 address - start_address,
++					 direct_remap_area_pte_fn, &w);
++		if (rc)
++			goto out;
++		rc = -EFAULT;
++		if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
++			goto out;
++	}
 +
-+	if (PageHighMem(page))
-+		return;
-+	BUG_ON(HYPERVISOR_update_va_mapping(
-+		(unsigned long)__va(pfn << PAGE_SHIFT),
-+		pfn_pte(pfn, flags), 0));
++	rc = 0;
++
++ out:
++	flush_tlb_all();
++
++	free_page((unsigned long)u);
++
++	return rc;
 +}
 +
-+static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++			   unsigned long address, 
++			   unsigned long mfn,
++			   unsigned long size, 
++			   pgprot_t prot,
++			   domid_t  domid)
 +{
-+	pgd_t *pgd = pgd_base;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	int    g, u, m;
-+
 +	if (xen_feature(XENFEAT_auto_translated_physmap))
-+		return;
++		return remap_pfn_range(vma, address, mfn, size, prot);
 +
-+	for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
-+		if (pgd_none(*pgd))
-+			continue;
-+		pud = pud_offset(pgd, 0);
-+		if (PTRS_PER_PUD > 1) /* not folded */
-+			pgd_walk_set_prot(pud,flags);
-+		for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
-+			if (pud_none(*pud))
-+				continue;
-+			pmd = pmd_offset(pud, 0);
-+			if (PTRS_PER_PMD > 1) /* not folded */
-+				pgd_walk_set_prot(pmd,flags);
-+			for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
-+				if (pmd_none(*pmd))
-+					continue;
-+				pte = pte_offset_kernel(pmd,0);
-+				pgd_walk_set_prot(pte,flags);
-+			}
-+		}
-+	}
++	if (domid == DOMID_SELF)
++		return -EINVAL;
 +
-+	BUG_ON(HYPERVISOR_update_va_mapping(
-+		(unsigned long)pgd_base,
-+		pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
-+		UVMF_TLB_FLUSH));
-+}
++	vma->vm_flags |= VM_IO | VM_RESERVED;
 +
-+static void __pgd_pin(pgd_t *pgd)
-+{
-+	pgd_walk(pgd, PAGE_KERNEL_RO);
-+	xen_pgd_pin(__pa(pgd));
-+	set_bit(PG_pinned, &virt_to_page(pgd)->flags);
-+}
++	vma->vm_mm->context.has_foreign_mappings = 1;
 +
-+static void __pgd_unpin(pgd_t *pgd)
-+{
-+	xen_pgd_unpin(__pa(pgd));
-+	pgd_walk(pgd, PAGE_KERNEL);
-+	clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
++	return __direct_remap_pfn_range(
++		vma->vm_mm, address, mfn, size, prot, domid);
 +}
++EXPORT_SYMBOL(direct_remap_pfn_range);
 +
-+static void pgd_test_and_unpin(pgd_t *pgd)
++int direct_kernel_remap_pfn_range(unsigned long address, 
++				  unsigned long mfn,
++				  unsigned long size, 
++				  pgprot_t prot,
++				  domid_t  domid)
 +{
-+	if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
-+		__pgd_unpin(pgd);
++	return __direct_remap_pfn_range(
++		&init_mm, address, mfn, size, prot, domid);
 +}
++EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
 +
-+void mm_pin(struct mm_struct *mm)
++static int lookup_pte_fn(
++	pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
 +{
-+	if (xen_feature(XENFEAT_writable_page_tables))
-+		return;
-+	spin_lock(&mm->page_table_lock);
-+	__pgd_pin(mm->pgd);
-+	spin_unlock(&mm->page_table_lock);
++	uint64_t *ptep = (uint64_t *)data;
++	if (ptep)
++		*ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) <<
++			 PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++	return 0;
 +}
 +
-+void mm_unpin(struct mm_struct *mm)
++int create_lookup_pte_addr(struct mm_struct *mm, 
++			   unsigned long address,
++			   uint64_t *ptep)
 +{
-+	if (xen_feature(XENFEAT_writable_page_tables))
-+		return;
-+	spin_lock(&mm->page_table_lock);
-+	__pgd_unpin(mm->pgd);
-+	spin_unlock(&mm->page_table_lock);
++	return apply_to_page_range(mm, address, PAGE_SIZE,
++				   lookup_pte_fn, ptep);
 +}
 +
-+void mm_pin_all(void)
-+{
-+	struct page *page;
-+
-+	/* Only pgds on the pgd_list please: none hidden in the slab cache. */
-+	kmem_cache_shrink(pgd_cache);
-+
-+	if (xen_feature(XENFEAT_writable_page_tables))
-+		return;
-+
-+	for (page = pgd_list; page; page = (struct page *)page->index) {
-+		if (!test_bit(PG_pinned, &page->flags))
-+			__pgd_pin((pgd_t *)page_address(page));
-+	}
-+}
++EXPORT_SYMBOL(create_lookup_pte_addr);
 +
-+void _arch_dup_mmap(struct mm_struct *mm)
++static int noop_fn(
++	pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
 +{
-+	if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
-+		mm_pin(mm);
++	return 0;
 +}
 +
-+void _arch_exit_mmap(struct mm_struct *mm)
++int touch_pte_range(struct mm_struct *mm,
++		    unsigned long address,
++		    unsigned long size)
 +{
-+	struct task_struct *tsk = current;
-+
-+	task_lock(tsk);
-+
-+	/*
-+	 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
-+	 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
-+	 */
-+	if (tsk->active_mm == mm) {
-+		tsk->active_mm = &init_mm;
-+		atomic_inc(&init_mm.mm_count);
++	return apply_to_page_range(mm, address, size, noop_fn, NULL);
++} 
 +
-+		switch_mm(mm, &init_mm, tsk);
++EXPORT_SYMBOL(touch_pte_range);
 +
-+		atomic_dec(&mm->mm_count);
-+		BUG_ON(atomic_read(&mm->mm_count) == 0);
-+	}
++/*
++ * Does @address reside within a non-highmem page that is local to this virtual
++ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
++ * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
++ * why this works.
++ */
++static inline int is_local_lowmem(unsigned long address)
++{
++	extern unsigned long max_low_pfn;
++	return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
++}
 +
-+	task_unlock(tsk);
++/*
++ * Generic mapping function (not visible outside):
++ */
 +
-+	if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
-+	    (atomic_read(&mm->mm_count) == 1) &&
-+	    !mm->context.has_foreign_mappings)
-+		mm_unpin(mm);
-+}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/oprofile/Makefile linux-2.6.18-xen/arch/i386/oprofile/Makefile
---- linux-2.6.18.3/arch/i386/oprofile/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/oprofile/Makefile	2006-11-19 14:26:22.000000000 +0100
-@@ -6,7 +6,11 @@
- 		oprofilefs.o oprofile_stats.o  \
- 		timer_int.o )
- 
-+ifdef CONFIG_XEN
-+oprofile-y				:= $(DRIVER_OBJS) xenoprof.o
-+else 
- oprofile-y				:= $(DRIVER_OBJS) init.o backtrace.o
- oprofile-$(CONFIG_X86_LOCAL_APIC) 	+= nmi_int.o op_model_athlon.o \
- 					   op_model_ppro.o op_model_p4.o
- oprofile-$(CONFIG_X86_IO_APIC)		+= nmi_timer_int.o
-+endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/oprofile/xenoprof.c linux-2.6.18-xen/arch/i386/oprofile/xenoprof.c
---- linux-2.6.18.3/arch/i386/oprofile/xenoprof.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/oprofile/xenoprof.c	2006-11-19 14:26:22.000000000 +0100
-@@ -0,0 +1,584 @@
-+/**
-+ * @file xenoprof.c
-+ *
-+ * @remark Copyright 2002 OProfile authors
-+ * @remark Read the file COPYING
-+ *
-+ * @author John Levon <levon at movementarian.org>
++/*
++ * Remap an arbitrary physical address space into the kernel virtual
++ * address space. Needed when the kernel wants to access high addresses
++ * directly.
 + *
-+ * Modified by Aravind Menon and Jose Renato Santos for Xen
-+ * These modifications are:
-+ * Copyright (C) 2005 Hewlett-Packard Co.
++ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
++ * have to convert them into an offset in a page-aligned mapping, but the
++ * caller shouldn't need to know that small detail.
 + */
++void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
++{
++	void __iomem * addr;
++	struct vm_struct * area;
++	unsigned long offset, last_addr;
++	domid_t domid = DOMID_IO;
 +
-+#include <linux/init.h>
-+#include <linux/notifier.h>
-+#include <linux/smp.h>
-+#include <linux/oprofile.h>
-+#include <linux/sysdev.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/vmalloc.h>
-+#include <asm/nmi.h>
-+#include <asm/msr.h>
-+#include <asm/apic.h>
-+#include <asm/pgtable.h>
-+#include <xen/evtchn.h>
-+#include "op_counter.h"
-+
-+#include <xen/driver_util.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/xenoprof.h>
-+#include <../../../drivers/oprofile/cpu_buffer.h>
-+#include <../../../drivers/oprofile/event_buffer.h>
-+
-+#define MAX_XENOPROF_SAMPLES 16
++	/* Don't allow wraparound or zero size */
++	last_addr = phys_addr + size - 1;
++	if (!size || last_addr < phys_addr)
++		return NULL;
 +
-+static int xenoprof_start(void);
-+static void xenoprof_stop(void);
++	/*
++	 * Don't remap the low PCI/ISA area, it's always mapped..
++	 */
++	if (is_initial_xendomain() &&
++	    phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++		return (void __iomem *) isa_bus_to_virt(phys_addr);
 +
-+static int xenoprof_enabled = 0;
-+static unsigned int num_events = 0;
-+static int is_primary = 0;
-+static int active_defined;
++	/*
++	 * Don't allow anybody to remap normal RAM that we're using..
++	 */
++	if (is_local_lowmem(phys_addr)) {
++		char *t_addr, *t_end;
++		struct page *page;
 +
-+/* sample buffers shared with Xen */
-+xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
-+/* Shared buffer area */
-+char * shared_buffer = NULL;
-+/* Number of buffers in shared area (one per VCPU) */
-+int nbuf;
-+/* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
-+int ovf_irq[NR_CPUS];
-+/* cpu model type string - copied from Xen memory space on XENOPROF_init command */
-+char cpu_type[XENOPROF_CPU_TYPE_SIZE];
++		t_addr = bus_to_virt(phys_addr);
++		t_end = t_addr + (size - 1);
++	   
++		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
++			if(!PageReserved(page))
++				return NULL;
 +
-+/* Passive sample buffers shared with Xen */
-+xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
-+/* Passive shared buffer area */
-+char *p_shared_buffer[MAX_OPROF_DOMAINS];
++		domid = DOMID_SELF;
++	}
 +
-+#ifdef CONFIG_PM
++	/*
++	 * Mappings have to be page-aligned
++	 */
++	offset = phys_addr & ~PAGE_MASK;
++	phys_addr &= PAGE_MASK;
++	size = PAGE_ALIGN(last_addr+1) - phys_addr;
 +
-+static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
-+{
-+	if (xenoprof_enabled == 1)
-+		xenoprof_stop();
-+	return 0;
++	/*
++	 * Ok, go for it..
++	 */
++	area = get_vm_area(size, VM_IOREMAP | (flags << 20));
++	if (!area)
++		return NULL;
++	area->phys_addr = phys_addr;
++	addr = (void __iomem *) area->addr;
++	flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
++	if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
++				     phys_addr>>PAGE_SHIFT,
++				     size, __pgprot(flags), domid)) {
++		vunmap((void __force *) addr);
++		return NULL;
++	}
++	return (void __iomem *) (offset + (char __iomem *)addr);
 +}
++EXPORT_SYMBOL(__ioremap);
 +
++/**
++ * ioremap_nocache     -   map bus memory into CPU space
++ * @offset:    bus address of the memory
++ * @size:      size of the resource to map
++ *
++ * ioremap_nocache performs a platform specific sequence of operations to
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
++ * writew/writel functions and the other mmio helpers. The returned
++ * address is not guaranteed to be usable directly as a virtual
++ * address. 
++ *
++ * This version of ioremap ensures that the memory is marked uncachable
++ * on the CPU as well as honouring existing caching rules from things like
++ * the PCI bus. Note that there are other caches and buffers on many 
++ * busses. In particular driver authors should read up on PCI writes
++ *
++ * It's useful if some control registers are in such an area and
++ * write combining or read caching is not desirable:
++ * 
++ * Must be freed with iounmap.
++ */
 +
-+static int xenoprof_resume(struct sys_device * dev)
++void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
 +{
-+	if (xenoprof_enabled == 1)
-+		xenoprof_start();
-+	return 0;
-+}
-+
++	unsigned long last_addr;
++	void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
++	if (!p) 
++		return p; 
 +
-+static struct sysdev_class oprofile_sysclass = {
-+	set_kset_name("oprofile"),
-+	.resume		= xenoprof_resume,
-+	.suspend	= xenoprof_suspend
-+};
++	/* Guaranteed to be > phys_addr, as per __ioremap() */
++	last_addr = phys_addr + size - 1;
 +
++	if (is_local_lowmem(last_addr)) { 
++		struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
++		unsigned long npages;
 +
-+static struct sys_device device_oprofile = {
-+	.id	= 0,
-+	.cls	= &oprofile_sysclass,
-+};
++		phys_addr &= PAGE_MASK;
 +
++		/* This might overflow and become zero.. */
++		last_addr = PAGE_ALIGN(last_addr);
 +
-+static int __init init_driverfs(void)
-+{
-+	int error;
-+	if (!(error = sysdev_class_register(&oprofile_sysclass)))
-+		error = sysdev_register(&device_oprofile);
-+	return error;
-+}
++		/* .. but that's ok, because modulo-2**n arithmetic will make
++	 	* the page-aligned "last - first" come out right.
++	 	*/
++		npages = (last_addr - phys_addr) >> PAGE_SHIFT;
 +
++		if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
++			iounmap(p); 
++			p = NULL;
++		}
++		global_flush_tlb();
++	}
 +
-+static void __exit exit_driverfs(void)
-+{
-+	sysdev_unregister(&device_oprofile);
-+	sysdev_class_unregister(&oprofile_sysclass);
++	return p;					
 +}
++EXPORT_SYMBOL(ioremap_nocache);
 +
-+#else
-+#define init_driverfs() do { } while (0)
-+#define exit_driverfs() do { } while (0)
-+#endif /* CONFIG_PM */
-+
-+unsigned long long oprofile_samples = 0;
-+unsigned long long p_oprofile_samples = 0;
++/**
++ * iounmap - Free a IO remapping
++ * @addr: virtual address from ioremap_*
++ *
++ * Caller must ensure there is only one unmapping for the same pointer.
++ */
++void iounmap(volatile void __iomem *addr)
++{
++	struct vm_struct *p, *o;
 +
-+unsigned int pdomains;
-+struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
++	if ((void __force *)addr <= high_memory)
++		return;
 +
-+static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
-+{
-+	int head, tail, size;
++	/*
++	 * __ioremap special-cases the PCI/ISA range by not instantiating a
++	 * vm_area and by simply returning an address into the kernel mapping
++	 * of ISA space.   So handle that here.
++	 */
++	if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++		return;
 +
-+	head = buf->event_head;
-+	tail = buf->event_tail;
-+	size = buf->event_size;
++	addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
 +
-+	if (tail > head) {
-+		while (tail < size) {
-+			oprofile_add_pc(buf->event_log[tail].eip,
-+					buf->event_log[tail].mode,
-+					buf->event_log[tail].event);
-+			if (!is_passive)
-+				oprofile_samples++;
-+			else
-+				p_oprofile_samples++;
-+			tail++;
-+		}
-+		tail = 0;
++	/* Use the vm area unlocked, assuming the caller
++	   ensures there isn't another iounmap for the same address
++	   in parallel. Reuse of the virtual address is prevented by
++	   leaving it in the global lists until we're done with it.
++	   cpa takes care of the direct mappings. */
++	read_lock(&vmlist_lock);
++	for (p = vmlist; p; p = p->next) {
++		if (p->addr == addr)
++			break;
 +	}
-+	while (tail < head) {
-+		oprofile_add_pc(buf->event_log[tail].eip,
-+				buf->event_log[tail].mode,
-+				buf->event_log[tail].event);
-+		if (!is_passive)
-+			oprofile_samples++;
-+		else
-+			p_oprofile_samples++;
-+		tail++;
++	read_unlock(&vmlist_lock);
++
++	if (!p) {
++		printk("iounmap: bad address %p\n", addr);
++		dump_stack();
++		return;
 +	}
 +
-+	buf->event_tail = tail;
-+}
++	/* Reset the direct mapping. Can block */
++	if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
++		/* p->size includes the guard page, but cpa doesn't like that */
++		change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
++				 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
++				 PAGE_KERNEL);
++		global_flush_tlb();
++	} 
 +
-+static void xenoprof_handle_passive(void)
-+{
-+	int i, j;
-+	int flag_domain, flag_switch = 0;
-+	
-+	for (i = 0; i < pdomains; i++) {
-+		flag_domain = 0;
-+		for (j = 0; j < passive_domains[i].nbuf; j++) {
-+			xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
-+			if (buf->event_head == buf->event_tail)
-+				continue;
-+			if (!flag_domain) {
-+				if (!oprofile_add_domain_switch(passive_domains[i].
-+								domain_id))
-+					goto done;
-+				flag_domain = 1;
-+			}
-+			xenoprof_add_pc(buf, 1);
-+			flag_switch = 1;
-+		}
-+	}
-+done:
-+	if (flag_switch)
-+		oprofile_add_domain_switch(COORDINATOR_DOMAIN);
++	/* Finally remove it */
++	o = remove_vm_area((void *)addr);
++	BUG_ON(p != o || o == NULL);
++	kfree(p); 
 +}
++EXPORT_SYMBOL(iounmap);
 +
-+static irqreturn_t 
-+xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
++void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
 +{
-+	struct xenoprof_buf * buf;
-+	int cpu;
-+	static unsigned long flag;
-+
-+	cpu = smp_processor_id();
-+	buf = xenoprof_buf[cpu];
-+
-+	xenoprof_add_pc(buf, 0);
++	unsigned long offset, last_addr;
++	unsigned int nrpages;
++	enum fixed_addresses idx;
 +
-+	if (is_primary && !test_and_set_bit(0, &flag)) {
-+		xenoprof_handle_passive();
-+		smp_mb__before_clear_bit();
-+		clear_bit(0, &flag);
-+	}
++	/* Don't allow wraparound or zero size */
++	last_addr = phys_addr + size - 1;
++	if (!size || last_addr < phys_addr)
++		return NULL;
 +
-+	return IRQ_HANDLED;
-+}
++	/*
++	 * Don't remap the low PCI/ISA area, it's always mapped..
++	 */
++	if (is_initial_xendomain() &&
++	    phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++		return isa_bus_to_virt(phys_addr);
 +
++	/*
++	 * Mappings have to be page-aligned
++	 */
++	offset = phys_addr & ~PAGE_MASK;
++	phys_addr &= PAGE_MASK;
++	size = PAGE_ALIGN(last_addr) - phys_addr;
 +
-+static void unbind_virq(void)
-+{
-+	int i;
++	/*
++	 * Mappings have to fit in the FIX_BTMAP area.
++	 */
++	nrpages = size >> PAGE_SHIFT;
++	if (nrpages > NR_FIX_BTMAPS)
++		return NULL;
 +
-+	for_each_possible_cpu(i) {
-+		if (ovf_irq[i] >= 0) {
-+			unbind_from_irqhandler(ovf_irq[i], NULL);
-+			ovf_irq[i] = -1;
-+		}
++	/*
++	 * Ok, go for it..
++	 */
++	idx = FIX_BTMAP_BEGIN;
++	while (nrpages > 0) {
++		set_fixmap(idx, phys_addr);
++		phys_addr += PAGE_SIZE;
++		--idx;
++		--nrpages;
 +	}
++	return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
 +}
 +
-+
-+static int bind_virq(void)
++void __init bt_iounmap(void *addr, unsigned long size)
 +{
-+	int i, result;
-+
-+	for_each_possible_cpu(i) {
-+		result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
-+						 i,
-+						 xenoprof_ovf_interrupt,
-+						 SA_INTERRUPT,
-+						 "xenoprof",
-+						 NULL);
++	unsigned long virt_addr;
++	unsigned long offset;
++	unsigned int nrpages;
++	enum fixed_addresses idx;
 +
-+		if (result < 0) {
-+			unbind_virq();
-+			return result;
-+		}
++	virt_addr = (unsigned long)addr;
++	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
++		return;
++	if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++		return;
++	offset = virt_addr & ~PAGE_MASK;
++	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
 +
-+		ovf_irq[i] = result;
++	idx = FIX_BTMAP_BEGIN;
++	while (nrpages > 0) {
++		clear_fixmap(idx);
++		--idx;
++		--nrpages;
 +	}
-+		
-+	return 0;
 +}
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/mm/Makefile b/arch/i386/mm/Makefile
+--- a/arch/i386/mm/Makefile	2007-03-12 21:58:05.000000000 +0100
++++ b/arch/i386/mm/Makefile	2007-03-14 10:55:14.000000000 +0100
+@@ -8,3 +8,11 @@
+ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+ obj-$(CONFIG_HIGHMEM) += highmem.o
+ obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
 +
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
 +
-+static int map_xenoprof_buffer(int max_samples)
-+{
-+	struct xenoprof_get_buffer get_buffer;
-+	struct xenoprof_buf *buf;
-+	int npages, ret, i;
-+	struct vm_struct *area;
++obj-y		+= hypervisor.o
 +
-+	if ( shared_buffer )
-+		return 0;
++obj-y := $(call cherrypickxen, $(obj-y))
++endif
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
+--- a/arch/i386/mm/pageattr.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/mm/pageattr.c	2007-03-14 10:55:14.000000000 +0100
+@@ -90,7 +90,7 @@
+ 	unsigned long flags;
+ 
+ 	set_pte_atomic(kpte, pte); 	/* change init_mm */
+-	if (PTRS_PER_PMD > 1)
++	if (HAVE_SHARED_KERNEL_PMD)
+ 		return;
+ 
+ 	spin_lock_irqsave(&pgd_lock, flags);
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/mm/pgtable-xen.c b/arch/i386/mm/pgtable-xen.c
+--- a/arch/i386/mm/pgtable-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/mm/pgtable-xen.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,724 @@
++/*
++ *  linux/arch/i386/mm/pgtable.c
++ */
 +
-+	get_buffer.max_samples = max_samples;
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/highmem.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/spinlock.h>
++#include <linux/module.h>
 +
-+	if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, &get_buffer)) )
-+		return ret;
++#include <asm/system.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
 +
-+	nbuf = get_buffer.nbuf;
-+	npages = (get_buffer.bufsize * nbuf - 1) / PAGE_SIZE + 1;
++#include <xen/features.h>
++#include <xen/foreign_page.h>
++#include <asm/hypervisor.h>
 +
-+	area = alloc_vm_area(npages * PAGE_SIZE);
-+	if (area == NULL)
-+		return -ENOMEM;
++static void pgd_test_and_unpin(pgd_t *pgd);
 +
-+	if ( (ret = direct_kernel_remap_pfn_range(
-+		      (unsigned long)area->addr,
-+		      get_buffer.buf_maddr >> PAGE_SHIFT,
-+		      npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE), DOMID_SELF)) ) {
-+		vunmap(area->addr);
-+		return ret;
-+	}
++void show_mem(void)
++{
++	int total = 0, reserved = 0;
++	int shared = 0, cached = 0;
++	int highmem = 0;
++	struct page *page;
++	pg_data_t *pgdat;
++	unsigned long i;
++	unsigned long flags;
 +
-+	shared_buffer = area->addr;
-+	for (i=0; i< nbuf; i++) {
-+		buf = (struct xenoprof_buf*) 
-+			&shared_buffer[i * get_buffer.bufsize];
-+		BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
-+		xenoprof_buf[buf->vcpu_id] = buf;
++	printk(KERN_INFO "Mem-info:\n");
++	show_free_areas();
++	printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++	for_each_online_pgdat(pgdat) {
++		pgdat_resize_lock(pgdat, &flags);
++		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++			page = pgdat_page_nr(pgdat, i);
++			total++;
++			if (PageHighMem(page))
++				highmem++;
++			if (PageReserved(page))
++				reserved++;
++			else if (PageSwapCache(page))
++				cached++;
++			else if (page_count(page))
++				shared += page_count(page) - 1;
++		}
++		pgdat_resize_unlock(pgdat, &flags);
 +	}
++	printk(KERN_INFO "%d pages of RAM\n", total);
++	printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
++	printk(KERN_INFO "%d reserved pages\n", reserved);
++	printk(KERN_INFO "%d pages shared\n", shared);
++	printk(KERN_INFO "%d pages swap cached\n", cached);
 +
-+	return 0;
++	printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
++	printk(KERN_INFO "%lu pages writeback\n",
++					global_page_state(NR_WRITEBACK));
++	printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
++	printk(KERN_INFO "%lu pages slab\n",
++		global_page_state(NR_SLAB_RECLAIMABLE) +
++		global_page_state(NR_SLAB_UNRECLAIMABLE));
++	printk(KERN_INFO "%lu pages pagetables\n",
++					global_page_state(NR_PAGETABLE));
 +}
 +
-+
-+static int xenoprof_setup(void)
++/*
++ * Associate a virtual page frame with a given physical page frame 
++ * and protection flags for that frame.
++ */ 
++static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
 +{
-+	int ret;
-+	int i;
-+
-+	if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
-+		return ret;
-+
-+	if ( (ret = bind_virq()) )
-+		return ret;
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
 +
-+	if (is_primary) {
-+		struct xenoprof_counter counter;
++	pgd = swapper_pg_dir + pgd_index(vaddr);
++	if (pgd_none(*pgd)) {
++		BUG();
++		return;
++	}
++	pud = pud_offset(pgd, vaddr);
++	if (pud_none(*pud)) {
++		BUG();
++		return;
++	}
++	pmd = pmd_offset(pud, vaddr);
++	if (pmd_none(*pmd)) {
++		BUG();
++		return;
++	}
++	pte = pte_offset_kernel(pmd, vaddr);
++	if (pgprot_val(flags))
++		/* <pfn,flags> stored as-is, to permit clearing entries */
++		set_pte(pte, pfn_pte(pfn, flags));
++	else
++		pte_clear(&init_mm, vaddr, pte);
 +
-+		/* Define dom0 as an active domain if not done yet */
-+		if (!active_defined) {
-+			domid_t domid;
-+			ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
-+			if (ret)
-+				goto err;
-+			domid = 0;
-+			ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
-+			if (ret)
-+				goto err;
-+			active_defined = 1;
-+		}
++	/*
++	 * It's enough to flush this one mapping.
++	 * (PGE mappings get flushed as well)
++	 */
++	__flush_tlb_one(vaddr);
++}
 +
-+		ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
-+		if (ret)
-+			goto err;
-+		for (i=0; i<num_events; i++) {
-+			counter.ind       = i;
-+			counter.count     = (uint64_t)counter_config[i].count;
-+			counter.enabled   = (uint32_t)counter_config[i].enabled;
-+			counter.event     = (uint32_t)counter_config[i].event;
-+			counter.kernel    = (uint32_t)counter_config[i].kernel;
-+			counter.user      = (uint32_t)counter_config[i].user;
-+			counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
-+			HYPERVISOR_xenoprof_op(XENOPROF_counter, 
-+					       &counter);
-+		}
-+		ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
++/*
++ * Associate a virtual page frame with a given physical page frame 
++ * and protection flags for that frame.
++ */ 
++static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
++			   pgprot_t flags)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
 +
-+		if (ret)
-+			goto err;
++	pgd = swapper_pg_dir + pgd_index(vaddr);
++	if (pgd_none(*pgd)) {
++		BUG();
++		return;
 +	}
++	pud = pud_offset(pgd, vaddr);
++	if (pud_none(*pud)) {
++		BUG();
++		return;
++	}
++	pmd = pmd_offset(pud, vaddr);
++	if (pmd_none(*pmd)) {
++		BUG();
++		return;
++	}
++	pte = pte_offset_kernel(pmd, vaddr);
++	/* <pfn,flags> stored as-is, to permit clearing entries */
++	set_pte(pte, pfn_pte_ma(pfn, flags));
 +
-+	ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
-+	if (ret)
-+		goto err;
-+
-+	xenoprof_enabled = 1;
-+	return 0;
-+ err:
-+	unbind_virq();
-+	return ret;
++	/*
++	 * It's enough to flush this one mapping.
++	 * (PGE mappings get flushed as well)
++	 */
++	__flush_tlb_one(vaddr);
 +}
 +
-+
-+static void xenoprof_shutdown(void)
++/*
++ * Associate a large virtual page frame with a given physical page frame 
++ * and protection flags for that frame. pfn is for the base of the page,
++ * vaddr is what the page gets mapped to - both must be properly aligned. 
++ * The pmd must already be instantiated. Assumes PAE mode.
++ */ 
++void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
 +{
-+	xenoprof_enabled = 0;
-+
-+	HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL);
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
 +
-+	if (is_primary) {
-+		HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL);
-+		active_defined = 0;
++	if (vaddr & (PMD_SIZE-1)) {		/* vaddr is misaligned */
++		printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
++		return; /* BUG(); */
 +	}
-+
-+	unbind_virq();
-+
++	if (pfn & (PTRS_PER_PTE-1)) {		/* pfn is misaligned */
++		printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
++		return; /* BUG(); */
++	}
++	pgd = swapper_pg_dir + pgd_index(vaddr);
++	if (pgd_none(*pgd)) {
++		printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
++		return; /* BUG(); */
++	}
++	pud = pud_offset(pgd, vaddr);
++	pmd = pmd_offset(pud, vaddr);
++	set_pmd(pmd, pfn_pmd(pfn, flags));
++	/*
++	 * It's enough to flush this one mapping.
++	 * (PGE mappings get flushed as well)
++	 */
++	__flush_tlb_one(vaddr);
 +}
 +
++static int fixmaps;
++#ifndef CONFIG_COMPAT_VDSO
++unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
++EXPORT_SYMBOL(__FIXADDR_TOP);
++#endif
 +
-+static int xenoprof_start(void)
++void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
 +{
-+	int ret = 0;
-+
-+	if (is_primary)
-+		ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
++	unsigned long address = __fix_to_virt(idx);
 +
-+	return ret;
++	if (idx >= __end_of_fixed_addresses) {
++		BUG();
++		return;
++	}
++	switch (idx) {
++	case FIX_WP_TEST:
++#ifdef CONFIG_X86_F00F_BUG
++	case FIX_F00F_IDT:
++#endif
++		set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
++		break;
++	default:
++		set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
++		break;
++	}
++	fixmaps++;
 +}
 +
-+
-+static void xenoprof_stop(void)
++/**
++ * reserve_top_address - reserves a hole in the top of kernel address space
++ * @reserve - size of hole to reserve
++ *
++ * Can be used to relocate the fixmap area and poke a hole in the top
++ * of kernel address space to make room for a hypervisor.
++ */
++void reserve_top_address(unsigned long reserve)
 +{
-+	if (is_primary)
-+		HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL);
++	BUG_ON(fixmaps > 0);
++#ifdef CONFIG_COMPAT_VDSO
++	BUG_ON(reserve != 0);
++#else
++	__FIXADDR_TOP = -reserve - PAGE_SIZE;
++	__VMALLOC_RESERVE += reserve;
++#endif
 +}
 +
-+
-+static int xenoprof_set_active(int * active_domains,
-+			       unsigned int adomains)
++void set_fixaddr_top(unsigned long top)
 +{
-+	int ret = 0;
-+	int i;
-+	int set_dom0 = 0;
-+	domid_t domid;
-+
-+	if (!is_primary)
-+		return 0;
++	BUG_ON(fixmaps > 0);
++	__FIXADDR_TOP = top - PAGE_SIZE;
++}
 +
-+	if (adomains > MAX_OPROF_DOMAINS)
-+		return -E2BIG;
++pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
++{
++	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
++	if (pte)
++		make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
++	return pte;
++}
 +
-+	ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
-+	if (ret)
-+		return ret;
++struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++{
++	struct page *pte;
 +
-+	for (i=0; i<adomains; i++) {
-+		domid = active_domains[i];
-+		if (domid != active_domains[i]) {
-+			ret = -EINVAL;
-+			goto out;
-+		}
-+		ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
-+		if (ret)
-+			goto out;
-+		if (active_domains[i] == 0)
-+			set_dom0 = 1;
-+	}
-+	/* dom0 must always be active but may not be in the list */ 
-+	if (!set_dom0) {
-+		domid = 0;
-+		ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++#ifdef CONFIG_HIGHPTE
++	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
++#else
++	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++	if (pte) {
++		SetPageForeign(pte, pte_free);
++		init_page_count(pte);
 +	}
-+
-+out:
-+	if (ret)
-+		HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
-+	active_defined = !ret;
-+	return ret;
++#endif
++	return pte;
 +}
 +
-+static int xenoprof_set_passive(int * p_domains,
-+                                unsigned int pdoms)
++void pte_free(struct page *pte)
 +{
-+	int ret;
-+	int i, j;
-+	int npages;
-+	struct xenoprof_buf *buf;
-+	struct vm_struct *area;
-+	pgprot_t prot = __pgprot(_KERNPG_TABLE);
-+
-+	if (!is_primary)
-+        	return 0;
-+
-+	if (pdoms > MAX_OPROF_DOMAINS)
-+		return -E2BIG;
-+
-+	ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
-+	if (ret)
-+		return ret;
-+
-+	for (i = 0; i < pdoms; i++) {
-+		passive_domains[i].domain_id = p_domains[i];
-+		passive_domains[i].max_samples = 2048;
-+		ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive,
-+					     &passive_domains[i]);
-+		if (ret)
-+			goto out;
-+
-+		npages = (passive_domains[i].bufsize * passive_domains[i].nbuf - 1) / PAGE_SIZE + 1;
-+
-+		area = alloc_vm_area(npages * PAGE_SIZE);
-+		if (area == NULL) {
-+			ret = -ENOMEM;
-+			goto out;
-+		}
-+
-+		ret = direct_kernel_remap_pfn_range(
-+			(unsigned long)area->addr,
-+			passive_domains[i].buf_maddr >> PAGE_SHIFT,
-+			npages * PAGE_SIZE, prot, DOMID_SELF);
-+		if (ret) {
-+			vunmap(area->addr);
-+			goto out;
-+		}
++	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
 +
-+		p_shared_buffer[i] = area->addr;
++	if (!pte_write(*virt_to_ptep(va)))
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
 +
-+		for (j = 0; j < passive_domains[i].nbuf; j++) {
-+			buf = (struct xenoprof_buf *)
-+				&p_shared_buffer[i][j * passive_domains[i].bufsize];
-+			BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
-+			p_xenoprof_buf[i][buf->vcpu_id] = buf;
-+		}
++	ClearPageForeign(pte);
++	init_page_count(pte);
 +
-+	}
++	__free_page(pte);
++}
 +
-+	pdomains = pdoms;
-+	return 0;
++void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
++{
++	memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++}
 +
-+out:
-+	for (j = 0; j < i; j++) {
-+		vunmap(p_shared_buffer[j]);
-+		p_shared_buffer[j] = NULL;
-+	}
++/*
++ * List of all pgd's needed for non-PAE so it can invalidate entries
++ * in both cached and uncached pgd's; not needed for PAE since the
++ * kernel pmd is shared. If PAE were not to share the pmd a similar
++ * tactic would be needed. This is essentially codepath-based locking
++ * against pageattr.c; it is the unique case in which a valid change
++ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
++ * vmalloc faults work because attached pagetables are never freed.
++ * The locking scheme was chosen on the basis of manfred's
++ * recommendations and having no core impact whatsoever.
++ * -- wli
++ */
++DEFINE_SPINLOCK(pgd_lock);
++struct page *pgd_list;
 +
-+ 	return ret;
++static inline void pgd_list_add(pgd_t *pgd)
++{
++	struct page *page = virt_to_page(pgd);
++	page->index = (unsigned long)pgd_list;
++	if (pgd_list)
++		set_page_private(pgd_list, (unsigned long)&page->index);
++	pgd_list = page;
++	set_page_private(page, (unsigned long)&pgd_list);
 +}
 +
-+struct op_counter_config counter_config[OP_MAX_COUNTER];
++static inline void pgd_list_del(pgd_t *pgd)
++{
++	struct page *next, **pprev, *page = virt_to_page(pgd);
++	next = (struct page *)page->index;
++	pprev = (struct page **)page_private(page);
++	*pprev = next;
++	if (next)
++		set_page_private(next, (unsigned long)pprev);
++}
 +
-+static int xenoprof_create_files(struct super_block * sb, struct dentry * root)
++void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 +{
-+	unsigned int i;
++	unsigned long flags;
 +
-+	for (i = 0; i < num_events; ++i) {
-+		struct dentry * dir;
-+		char buf[2];
-+ 
-+		snprintf(buf, 2, "%d", i);
-+		dir = oprofilefs_mkdir(sb, root, buf);
-+		oprofilefs_create_ulong(sb, dir, "enabled",
-+					&counter_config[i].enabled);
-+		oprofilefs_create_ulong(sb, dir, "event",
-+					&counter_config[i].event);
-+		oprofilefs_create_ulong(sb, dir, "count",
-+					&counter_config[i].count);
-+		oprofilefs_create_ulong(sb, dir, "unit_mask",
-+					&counter_config[i].unit_mask);
-+		oprofilefs_create_ulong(sb, dir, "kernel",
-+					&counter_config[i].kernel);
-+		oprofilefs_create_ulong(sb, dir, "user",
-+					&counter_config[i].user);
++	if (PTRS_PER_PMD > 1) {
++		if (HAVE_SHARED_KERNEL_PMD)
++			clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++					swapper_pg_dir + USER_PTRS_PER_PGD,
++					KERNEL_PGD_PTRS);
++	} else {
++		spin_lock_irqsave(&pgd_lock, flags);
++		clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++				swapper_pg_dir + USER_PTRS_PER_PGD,
++				KERNEL_PGD_PTRS);
++		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
++		pgd_list_add(pgd);
++		spin_unlock_irqrestore(&pgd_lock, flags);
 +	}
-+
-+	return 0;
 +}
 +
-+
-+struct oprofile_operations xenoprof_ops = {
-+	.create_files 	= xenoprof_create_files,
-+	.set_active	= xenoprof_set_active,
-+	.set_passive    = xenoprof_set_passive,
-+	.setup 		= xenoprof_setup,
-+	.shutdown	= xenoprof_shutdown,
-+	.start		= xenoprof_start,
-+	.stop		= xenoprof_stop
-+};
-+
-+
-+/* in order to get driverfs right */
-+static int using_xenoprof;
-+
-+int __init oprofile_arch_init(struct oprofile_operations * ops)
++/* never called when PTRS_PER_PMD > 1 */
++void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 +{
-+	struct xenoprof_init init;
-+	int ret, i;
-+
-+	ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
++	unsigned long flags; /* can be called from interrupt context */
 +
-+	if (!ret) {
-+		num_events = init.num_events;
-+		is_primary = init.is_primary;
++	spin_lock_irqsave(&pgd_lock, flags);
++	pgd_list_del(pgd);
++	spin_unlock_irqrestore(&pgd_lock, flags);
 +
-+		/* just in case - make sure we do not overflow event list 
-+		   (i.e. counter_config list) */
-+		if (num_events > OP_MAX_COUNTER)
-+			num_events = OP_MAX_COUNTER;
++	pgd_test_and_unpin(pgd);
++}
 +
-+		/*  cpu_type is detected by Xen */
-+		cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
-+		strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
-+		xenoprof_ops.cpu_type = cpu_type;
++pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++	int i;
++	pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
++	pmd_t **pmd;
++	unsigned long flags;
 +
-+		init_driverfs();
-+		using_xenoprof = 1;
-+		*ops = xenoprof_ops;
++	pgd_test_and_unpin(pgd);
 +
-+		for (i=0; i<NR_CPUS; i++)
-+			ovf_irq[i] = -1;
++	if (PTRS_PER_PMD == 1 || !pgd)
++		return pgd;
 +
-+		active_defined = 0;
++	if (HAVE_SHARED_KERNEL_PMD) {
++		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++			if (!pmd)
++				goto out_oom;
++			set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
++		}
++		return pgd;
 +	}
-+	printk(KERN_INFO "oprofile_arch_init: ret %d, events %d, "
-+	       "is_primary %d\n", ret, num_events, is_primary);
-+	return ret;
-+}
-+
-+
-+void __exit oprofile_arch_exit(void)
-+{
-+	int i;
 +
-+	if (using_xenoprof)
-+		exit_driverfs();
++	/*
++	 * We can race save/restore (if we sleep during a GFP_KERNEL memory
++	 * allocation). We therefore store virtual addresses of pmds as they
++	 * do not change across save/restore, and poke the machine addresses
++	 * into the pgdir under the pgd_lock.
++	 */
++	pmd = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
++	if (!pmd) {
++		kmem_cache_free(pgd_cache, pgd);
++		return NULL;
++	}
 +
-+	if (shared_buffer) {
-+		vunmap(shared_buffer);
-+		shared_buffer = NULL;
++	/* Allocate pmds, remember virtual addresses. */
++	for (i = 0; i < PTRS_PER_PGD; ++i) {
++		pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++		if (!pmd[i])
++			goto out_oom;
 +	}
-+	if (is_primary) {
-+		for (i = 0; i < pdomains; i++)
-+			if (p_shared_buffer[i]) {
-+		                vunmap(p_shared_buffer[i]);
-+                		p_shared_buffer[i] = NULL;
-+			}
-+		HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);
-+        }
 +
-+}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/pci/irq-xen.c linux-2.6.18-xen/arch/i386/pci/irq-xen.c
---- linux-2.6.18.3/arch/i386/pci/irq-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/pci/irq-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,1206 @@
-+/*
-+ *	Low-Level PCI Support for PC -- Routing of Interrupts
-+ *
-+ *	(c) 1999--2000 Martin Mares <mj at ucw.cz>
-+ */
++	spin_lock_irqsave(&pgd_lock, flags);
 +
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include <linux/init.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/dmi.h>
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/io_apic.h>
-+#include <linux/irq.h>
-+#include <linux/acpi.h>
++	/* Protect against save/restore: move below 4GB under pgd_lock. */
++	if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
++		int rc = xen_create_contiguous_region(
++			(unsigned long)pgd, 0, 32);
++		if (rc) {
++			spin_unlock_irqrestore(&pgd_lock, flags);
++			goto out_oom;
++		}
++	}
 +
-+#include "pci.h"
++	/* Copy kernel pmd contents and write-protect the new pmds. */
++	for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++		unsigned long v = (unsigned long)i << PGDIR_SHIFT;
++		pgd_t *kpgd = pgd_offset_k(v);
++		pud_t *kpud = pud_offset(kpgd, v);
++		pmd_t *kpmd = pmd_offset(kpud, v);
++		memcpy(pmd[i], kpmd, PAGE_SIZE);
++		make_lowmem_page_readonly(
++			pmd[i], XENFEAT_writable_page_tables);
++	}
 +
-+#define PIRQ_SIGNATURE	(('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
-+#define PIRQ_VERSION 0x0100
++	/* It is safe to poke machine addresses of pmds under the pmd_lock. */
++	for (i = 0; i < PTRS_PER_PGD; i++)
++		set_pgd(&pgd[i], __pgd(1 + __pa(pmd[i])));
 +
-+static int broken_hp_bios_irq9;
-+static int acer_tm360_irqrouting;
++	/* Ensure this pgd gets picked up and pinned on save/restore. */
++	pgd_list_add(pgd);
 +
-+static struct irq_routing_table *pirq_table;
++	spin_unlock_irqrestore(&pgd_lock, flags);
 +
-+static int pirq_enable_irq(struct pci_dev *dev);
++	kfree(pmd);
 +
-+/*
-+ * Never use: 0, 1, 2 (timer, keyboard, and cascade)
-+ * Avoid using: 13, 14 and 15 (FP error and IDE).
-+ * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
-+ */
-+unsigned int pcibios_irq_mask = 0xfff8;
++	return pgd;
 +
-+static int pirq_penalty[16] = {
-+	1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
-+	0, 0, 0, 0, 1000, 100000, 100000, 100000
-+};
++out_oom:
++	if (HAVE_SHARED_KERNEL_PMD) {
++		for (i--; i >= 0; i--)
++			kmem_cache_free(pmd_cache,
++					(void *)__va(pgd_val(pgd[i])-1));
++	} else {
++		for (i--; i >= 0; i--)
++			kmem_cache_free(pmd_cache, pmd[i]);
++		kfree(pmd);
++	}
++	kmem_cache_free(pgd_cache, pgd);
++	return NULL;
++}
 +
-+struct irq_router {
-+	char *name;
-+	u16 vendor, device;
-+	int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
-+	int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
-+};
++void pgd_free(pgd_t *pgd)
++{
++	int i;
 +
-+struct irq_router_handler {
-+	u16 vendor;
-+	int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
-+};
++	/*
++	 * After this the pgd should not be pinned for the duration of this
++	 * function's execution. We should never sleep and thus never race:
++	 *  1. User pmds will not become write-protected under our feet due
++	 *     to a concurrent mm_pin_all().
++	 *  2. The machine addresses in PGD entries will not become invalid
++	 *     due to a concurrent save/restore.
++	 */
++	pgd_test_and_unpin(pgd);
 +
-+int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
-+void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
++	/* in the PAE case user pgd entries are overwritten before usage */
++	if (PTRS_PER_PMD > 1) {
++		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++			kmem_cache_free(pmd_cache, pmd);
++		}
 +
-+/*
-+ *  Check passed address for the PCI IRQ Routing Table signature
-+ *  and perform checksum verification.
-+ */
++		if (!HAVE_SHARED_KERNEL_PMD) {
++			unsigned long flags;
++			spin_lock_irqsave(&pgd_lock, flags);
++			pgd_list_del(pgd);
++			spin_unlock_irqrestore(&pgd_lock, flags);
 +
-+static inline struct irq_routing_table * pirq_check_routing_table(u8 *addr)
-+{
-+	struct irq_routing_table *rt;
-+	int i;
-+	u8 sum;
++			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++				make_lowmem_page_writable(
++					pmd, XENFEAT_writable_page_tables);
++				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++				kmem_cache_free(pmd_cache, pmd);
++			}
 +
-+	rt = (struct irq_routing_table *) addr;
-+	if (rt->signature != PIRQ_SIGNATURE ||
-+	    rt->version != PIRQ_VERSION ||
-+	    rt->size % 16 ||
-+	    rt->size < sizeof(struct irq_routing_table))
-+		return NULL;
-+	sum = 0;
-+	for (i=0; i < rt->size; i++)
-+		sum += addr[i];
-+	if (!sum) {
-+		DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt);
-+		return rt;
++			if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
++				xen_destroy_contiguous_region(
++					(unsigned long)pgd, 0);
++		}
 +	}
-+	return NULL;
++
++	/* in the non-PAE case, free_pgtables() clears user pgd entries */
++	kmem_cache_free(pgd_cache, pgd);
 +}
 +
++void make_lowmem_page_readonly(void *va, unsigned int feature)
++{
++	pte_t *pte;
++	int rc;
 +
++	if (xen_feature(feature))
++		return;
 +
-+/*
-+ *  Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
-+ */
++	pte = virt_to_ptep(va);
++	rc = HYPERVISOR_update_va_mapping(
++		(unsigned long)va, pte_wrprotect(*pte), 0);
++	BUG_ON(rc);
++}
 +
-+static struct irq_routing_table * __init pirq_find_routing_table(void)
++void make_lowmem_page_writable(void *va, unsigned int feature)
 +{
-+	u8 *addr;
-+	struct irq_routing_table *rt;
++	pte_t *pte;
++	int rc;
 +
-+#ifdef CONFIG_XEN
-+	if (!is_initial_xendomain())
-+		return NULL;
-+#endif
-+	if (pirq_table_addr) {
-+		rt = pirq_check_routing_table((u8 *) isa_bus_to_virt(pirq_table_addr));
-+		if (rt)
-+			return rt;
-+		printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n");
-+	}
-+	for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
-+		rt = pirq_check_routing_table(addr);
-+		if (rt)
-+			return rt;
-+	}
-+	
-+	return NULL;
++	if (xen_feature(feature))
++		return;
++
++	pte = virt_to_ptep(va);
++	rc = HYPERVISOR_update_va_mapping(
++		(unsigned long)va, pte_mkwrite(*pte), 0);
++	BUG_ON(rc);
 +}
 +
-+/*
-+ *  If we have a IRQ routing table, use it to search for peer host
-+ *  bridges.  It's a gross hack, but since there are no other known
-+ *  ways how to get a list of buses, we have to go this way.
-+ */
-+
-+static void __init pirq_peer_trick(void)
++void make_page_readonly(void *va, unsigned int feature)
 +{
-+	struct irq_routing_table *rt = pirq_table;
-+	u8 busmap[256];
-+	int i;
-+	struct irq_info *e;
++	pte_t *pte;
++	int rc;
 +
-+	memset(busmap, 0, sizeof(busmap));
-+	for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
-+		e = &rt->slots[i];
-+#ifdef DEBUG
-+		{
-+			int j;
-+			DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
-+			for(j=0; j<4; j++)
-+				DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
-+			DBG("\n");
-+		}
++	if (xen_feature(feature))
++		return;
++
++	pte = virt_to_ptep(va);
++	rc = HYPERVISOR_update_va_mapping(
++		(unsigned long)va, pte_wrprotect(*pte), 0);
++	if (rc) /* fallback? */
++		xen_l1_entry_update(pte, pte_wrprotect(*pte));
++	if ((unsigned long)va >= (unsigned long)high_memory) {
++		unsigned long pfn = pte_pfn(*pte);
++#ifdef CONFIG_HIGHMEM
++		if (pfn >= highstart_pfn)
++			kmap_flush_unused(); /* flush stale writable kmaps */
++		else
 +#endif
-+		busmap[e->bus] = 1;
-+	}
-+	for(i = 1; i < 256; i++) {
-+		if (!busmap[i] || pci_find_bus(0, i))
-+			continue;
-+		if (pci_scan_bus(i, &pci_root_ops, NULL))
-+			printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
++			make_lowmem_page_readonly(
++				phys_to_virt(pfn << PAGE_SHIFT), feature); 
 +	}
-+	pcibios_last_bus = -1;
 +}
 +
-+/*
-+ *  Code for querying and setting of IRQ routes on various interrupt routers.
-+ */
-+
-+void eisa_set_level_irq(unsigned int irq)
++void make_page_writable(void *va, unsigned int feature)
 +{
-+	unsigned char mask = 1 << (irq & 7);
-+	unsigned int port = 0x4d0 + (irq >> 3);
-+	unsigned char val;
-+	static u16 eisa_irq_mask;
++	pte_t *pte;
++	int rc;
 +
-+	if (irq >= 16 || (1 << irq) & eisa_irq_mask)
++	if (xen_feature(feature))
 +		return;
 +
-+	eisa_irq_mask |= (1 << irq);
-+	printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq);
-+	val = inb(port);
-+	if (!(val & mask)) {
-+		DBG(KERN_DEBUG " -> edge");
-+		outb(val | mask, port);
++	pte = virt_to_ptep(va);
++	rc = HYPERVISOR_update_va_mapping(
++		(unsigned long)va, pte_mkwrite(*pte), 0);
++	if (rc) /* fallback? */
++		xen_l1_entry_update(pte, pte_mkwrite(*pte));
++	if ((unsigned long)va >= (unsigned long)high_memory) {
++		unsigned long pfn = pte_pfn(*pte); 
++#ifdef CONFIG_HIGHMEM
++		if (pfn < highstart_pfn)
++#endif
++			make_lowmem_page_writable(
++				phys_to_virt(pfn << PAGE_SHIFT), feature);
 +	}
 +}
 +
-+/*
-+ * Common IRQ routing practice: nybbles in config space,
-+ * offset by some magic constant.
-+ */
-+static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
 +{
-+	u8 x;
-+	unsigned reg = offset + (nr >> 1);
++	if (xen_feature(feature))
++		return;
 +
-+	pci_read_config_byte(router, reg, &x);
-+	return (nr & 1) ? (x >> 4) : (x & 0xf);
++	while (nr-- != 0) {
++		make_page_readonly(va, feature);
++		va = (void *)((unsigned long)va + PAGE_SIZE);
++	}
 +}
 +
-+static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
 +{
-+	u8 x;
-+	unsigned reg = offset + (nr >> 1);
++	if (xen_feature(feature))
++		return;
 +
-+	pci_read_config_byte(router, reg, &x);
-+	x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
-+	pci_write_config_byte(router, reg, x);
++	while (nr-- != 0) {
++		make_page_writable(va, feature);
++		va = (void *)((unsigned long)va + PAGE_SIZE);
++	}
 +}
 +
-+/*
-+ * ALI pirq entries are damn ugly, and completely undocumented.
-+ * This has been figured out from pirq tables, and it's not a pretty
-+ * picture.
-+ */
-+static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
 +{
-+	static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
++	struct page *page = virt_to_page(pt);
++	unsigned long pfn = page_to_pfn(page);
 +
-+	return irqmap[read_config_nybble(router, 0x48, pirq-1)];
++	if (PageHighMem(page))
++		return;
++	BUG_ON(HYPERVISOR_update_va_mapping(
++		(unsigned long)__va(pfn << PAGE_SHIFT),
++		pfn_pte(pfn, flags), 0));
 +}
 +
-+static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
 +{
-+	static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
-+	unsigned int val = irqmap[irq];
-+		
-+	if (val) {
-+		write_config_nybble(router, 0x48, pirq-1, val);
-+		return 1;
-+	}
-+	return 0;
-+}
++	pgd_t *pgd = pgd_base;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++	int    g, u, m;
 +
-+/*
-+ * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
-+ * just a pointer to the config space.
-+ */
-+static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	u8 x;
++	if (xen_feature(XENFEAT_auto_translated_physmap))
++		return;
 +
-+	pci_read_config_byte(router, pirq, &x);
-+	return (x < 16) ? x : 0;
-+}
++	for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
++		if (pgd_none(*pgd))
++			continue;
++		pud = pud_offset(pgd, 0);
++		if (PTRS_PER_PUD > 1) /* not folded */
++			pgd_walk_set_prot(pud,flags);
++		for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++			if (pud_none(*pud))
++				continue;
++			pmd = pmd_offset(pud, 0);
++			if (PTRS_PER_PMD > 1) /* not folded */
++				pgd_walk_set_prot(pmd,flags);
++			for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++				if (pmd_none(*pmd))
++					continue;
++				pte = pte_offset_kernel(pmd,0);
++				pgd_walk_set_prot(pte,flags);
++			}
++		}
++	}
 +
-+static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	pci_write_config_byte(router, pirq, irq);
-+	return 1;
++	BUG_ON(HYPERVISOR_update_va_mapping(
++		(unsigned long)pgd_base,
++		pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
++		UVMF_TLB_FLUSH));
 +}
 +
-+/*
-+ * The VIA pirq rules are nibble-based, like ALI,
-+ * but without the ugly irq number munging.
-+ * However, PIRQD is in the upper instead of lower 4 bits.
-+ */
-+static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++static void __pgd_pin(pgd_t *pgd)
 +{
-+	return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
++	pgd_walk(pgd, PAGE_KERNEL_RO);
++	xen_pgd_pin(__pa(pgd));
++	set_bit(PG_pinned, &virt_to_page(pgd)->flags);
 +}
 +
-+static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++static void __pgd_unpin(pgd_t *pgd)
 +{
-+	write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
-+	return 1;
++	xen_pgd_unpin(__pa(pgd));
++	pgd_walk(pgd, PAGE_KERNEL);
++	clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
 +}
 +
-+/*
-+ * The VIA pirq rules are nibble-based, like ALI,
-+ * but without the ugly irq number munging.
-+ * However, for 82C586, nibble map is different .
-+ */
-+static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++static void pgd_test_and_unpin(pgd_t *pgd)
 +{
-+	static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
-+	return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
++	if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
++		__pgd_unpin(pgd);
 +}
 +
-+static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++void mm_pin(struct mm_struct *mm)
 +{
-+	static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
-+	write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
-+	return 1;
++	if (xen_feature(XENFEAT_writable_page_tables))
++		return;
++	spin_lock(&mm->page_table_lock);
++	__pgd_pin(mm->pgd);
++	spin_unlock(&mm->page_table_lock);
 +}
 +
-+/*
-+ * ITE 8330G pirq rules are nibble-based
-+ * FIXME: pirqmap may be { 1, 0, 3, 2 },
-+ * 	  2+3 are both mapped to irq 9 on my system
-+ */
-+static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++void mm_unpin(struct mm_struct *mm)
 +{
-+	static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
-+	return read_config_nybble(router,0x43, pirqmap[pirq-1]);
++	if (xen_feature(XENFEAT_writable_page_tables))
++		return;
++	spin_lock(&mm->page_table_lock);
++	__pgd_unpin(mm->pgd);
++	spin_unlock(&mm->page_table_lock);
 +}
 +
-+static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++void mm_pin_all(void)
 +{
-+	static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
-+	write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
-+	return 1;
-+}
++	struct page *page;
 +
-+/*
-+ * OPTI: high four bits are nibble pointer..
-+ * I wonder what the low bits do?
-+ */
-+static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	return read_config_nybble(router, 0xb8, pirq >> 4);
-+}
++	/* Only pgds on the pgd_list please: none hidden in the slab cache. */
++	kmem_cache_shrink(pgd_cache);
 +
-+static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	write_config_nybble(router, 0xb8, pirq >> 4, irq);
-+	return 1;
++	if (xen_feature(XENFEAT_writable_page_tables))
++		return;
++
++	for (page = pgd_list; page; page = (struct page *)page->index) {
++		if (!test_bit(PG_pinned, &page->flags))
++			__pgd_pin((pgd_t *)page_address(page));
++	}
 +}
 +
-+/*
-+ * Cyrix: nibble offset 0x5C
-+ * 0x5C bits 7:4 is INTB bits 3:0 is INTA 
-+ * 0x5D bits 7:4 is INTD bits 3:0 is INTC
-+ */
-+static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++void _arch_dup_mmap(struct mm_struct *mm)
 +{
-+	return read_config_nybble(router, 0x5C, (pirq-1)^1);
++	if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
++		mm_pin(mm);
 +}
 +
-+static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++void _arch_exit_mmap(struct mm_struct *mm)
 +{
-+	write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
-+	return 1;
-+}
++	struct task_struct *tsk = current;
 +
-+/*
-+ *	PIRQ routing for SiS 85C503 router used in several SiS chipsets.
-+ *	We have to deal with the following issues here:
-+ *	- vendors have different ideas about the meaning of link values
-+ *	- some onboard devices (integrated in the chipset) have special
-+ *	  links and are thus routed differently (i.e. not via PCI INTA-INTD)
-+ *	- different revision of the router have a different layout for
-+ *	  the routing registers, particularly for the onchip devices
-+ *
-+ *	For all routing registers the common thing is we have one byte
-+ *	per routeable link which is defined as:
-+ *		 bit 7      IRQ mapping enabled (0) or disabled (1)
-+ *		 bits [6:4] reserved (sometimes used for onchip devices)
-+ *		 bits [3:0] IRQ to map to
-+ *		     allowed: 3-7, 9-12, 14-15
-+ *		     reserved: 0, 1, 2, 8, 13
-+ *
-+ *	The config-space registers located at 0x41/0x42/0x43/0x44 are
-+ *	always used to route the normal PCI INT A/B/C/D respectively.
-+ *	Apparently there are systems implementing PCI routing table using
-+ *	link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
-+ *	We try our best to handle both link mappings.
-+ *	
-+ *	Currently (2003-05-21) it appears most SiS chipsets follow the
-+ *	definition of routing registers from the SiS-5595 southbridge.
-+ *	According to the SiS 5595 datasheets the revision id's of the
-+ *	router (ISA-bridge) should be 0x01 or 0xb0.
-+ *
-+ *	Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
-+ *	Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
-+ *	They seem to work with the current routing code. However there is
-+ *	some concern because of the two USB-OHCI HCs (original SiS 5595
-+ *	had only one). YMMV.
-+ *
-+ *	Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
-+ *
-+ *	0x61:	IDEIRQ:
-+ *		bits [6:5] must be written 01
-+ *		bit 4 channel-select primary (0), secondary (1)
-+ *
-+ *	0x62:	USBIRQ:
-+ *		bit 6 OHCI function disabled (0), enabled (1)
-+ *	
-+ *	0x6a:	ACPI/SCI IRQ: bits 4-6 reserved
-+ *
-+ *	0x7e:	Data Acq. Module IRQ - bits 4-6 reserved
-+ *
-+ *	We support USBIRQ (in addition to INTA-INTD) and keep the
-+ *	IDE, ACPI and DAQ routing untouched as set by the BIOS.
++	task_lock(tsk);
++
++	/*
++	 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
++	 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
++	 */
++	if (tsk->active_mm == mm) {
++		tsk->active_mm = &init_mm;
++		atomic_inc(&init_mm.mm_count);
++
++		switch_mm(mm, &init_mm, tsk);
++
++		atomic_dec(&mm->mm_count);
++		BUG_ON(atomic_read(&mm->mm_count) == 0);
++	}
++
++	task_unlock(tsk);
++
++	if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
++	    (atomic_read(&mm->mm_count) == 1) &&
++	    !mm->context.has_foreign_mappings)
++		mm_unpin(mm);
++}
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/oprofile/Makefile b/arch/i386/oprofile/Makefile
+--- a/arch/i386/oprofile/Makefile	2007-03-12 21:58:05.000000000 +0100
++++ b/arch/i386/oprofile/Makefile	2007-03-14 10:55:14.000000000 +0100
+@@ -6,7 +6,11 @@
+ 		oprofilefs.o oprofile_stats.o  \
+ 		timer_int.o )
+ 
++ifdef CONFIG_XEN
++oprofile-y				:= $(DRIVER_OBJS) xenoprof.o
++else 
+ oprofile-y				:= $(DRIVER_OBJS) init.o backtrace.o
+ oprofile-$(CONFIG_X86_LOCAL_APIC) 	+= nmi_int.o op_model_athlon.o \
+ 					   op_model_ppro.o op_model_p4.o
+ oprofile-$(CONFIG_X86_IO_APIC)		+= nmi_timer_int.o
++endif
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/oprofile/xenoprof.c b/arch/i386/oprofile/xenoprof.c
+--- a/arch/i386/oprofile/xenoprof.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/oprofile/xenoprof.c	2007-03-14 10:55:14.000000000 +0100
+@@ -0,0 +1,584 @@
++/**
++ * @file xenoprof.c
 + *
-+ *	Currently the only reported exception is the new SiS 65x chipset
-+ *	which includes the SiS 69x southbridge. Here we have the 85C503
-+ *	router revision 0x04 and there are changes in the register layout
-+ *	mostly related to the different USB HCs with USB 2.0 support.
++ * @remark Copyright 2002 OProfile authors
++ * @remark Read the file COPYING
 + *
-+ *	Onchip routing for router rev-id 0x04 (try-and-error observation)
++ * @author John Levon <levon at movementarian.org>
 + *
-+ *	0x60/0x61/0x62/0x63:	1xEHCI and 3xOHCI (companion) USB-HCs
-+ *				bit 6-4 are probably unused, not like 5595
++ * Modified by Aravind Menon and Jose Renato Santos for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
 + */
 +
-+#define PIRQ_SIS_IRQ_MASK	0x0f
-+#define PIRQ_SIS_IRQ_DISABLE	0x80
-+#define PIRQ_SIS_USB_ENABLE	0x40
++#include <linux/init.h>
++#include <linux/notifier.h>
++#include <linux/smp.h>
++#include <linux/oprofile.h>
++#include <linux/sysdev.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/vmalloc.h>
++#include <asm/nmi.h>
++#include <asm/msr.h>
++#include <asm/apic.h>
++#include <asm/pgtable.h>
++#include <xen/evtchn.h>
++#include "op_counter.h"
++
++#include <xen/driver_util.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/xenoprof.h>
++#include <../../../drivers/oprofile/cpu_buffer.h>
++#include <../../../drivers/oprofile/event_buffer.h>
 +
-+static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	u8 x;
-+	int reg;
++#define MAX_XENOPROF_SAMPLES 16
 +
-+	reg = pirq;
-+	if (reg >= 0x01 && reg <= 0x04)
-+		reg += 0x40;
-+	pci_read_config_byte(router, reg, &x);
-+	return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK);
-+}
++static int xenoprof_start(void);
++static void xenoprof_stop(void);
++
++static int xenoprof_enabled = 0;
++static unsigned int num_events = 0;
++static int is_primary = 0;
++static int active_defined;
++
++/* sample buffers shared with Xen */
++xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
++/* Shared buffer area */
++char * shared_buffer = NULL;
++/* Number of buffers in shared area (one per VCPU) */
++int nbuf;
++/* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
++int ovf_irq[NR_CPUS];
++/* cpu model type string - copied from Xen memory space on XENOPROF_init command */
++char cpu_type[XENOPROF_CPU_TYPE_SIZE];
++
++/* Passive sample buffers shared with Xen */
++xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
++/* Passive shared buffer area */
++char *p_shared_buffer[MAX_OPROF_DOMAINS];
++
++#ifdef CONFIG_PM
 +
-+static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
 +{
-+	u8 x;
-+	int reg;
++	if (xenoprof_enabled == 1)
++		xenoprof_stop();
++	return 0;
++}
 +
-+	reg = pirq;
-+	if (reg >= 0x01 && reg <= 0x04)
-+		reg += 0x40;
-+	pci_read_config_byte(router, reg, &x);
-+	x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE);
-+	x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE;
-+	pci_write_config_byte(router, reg, x);
-+	return 1;
++
++static int xenoprof_resume(struct sys_device * dev)
++{
++	if (xenoprof_enabled == 1)
++		xenoprof_start();
++	return 0;
 +}
 +
 +
-+/*
-+ * VLSI: nibble offset 0x74 - educated guess due to routing table and
-+ *       config space of VLSI 82C534 PCI-bridge/router (1004:0102)
-+ *       Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
-+ *       devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
-+ *       for the busbridge to the docking station.
-+ */
++static struct sysdev_class oprofile_sysclass = {
++	set_kset_name("oprofile"),
++	.resume		= xenoprof_resume,
++	.suspend	= xenoprof_suspend
++};
++
++
++static struct sys_device device_oprofile = {
++	.id	= 0,
++	.cls	= &oprofile_sysclass,
++};
 +
-+static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++
++static int __init init_driverfs(void)
 +{
-+	if (pirq > 8) {
-+		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
-+		return 0;
-+	}
-+	return read_config_nybble(router, 0x74, pirq-1);
++	int error;
++	if (!(error = sysdev_class_register(&oprofile_sysclass)))
++		error = sysdev_register(&device_oprofile);
++	return error;
 +}
 +
-+static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++
++static void __exit exit_driverfs(void)
 +{
-+	if (pirq > 8) {
-+		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
-+		return 0;
-+	}
-+	write_config_nybble(router, 0x74, pirq-1, irq);
-+	return 1;
++	sysdev_unregister(&device_oprofile);
++	sysdev_class_unregister(&oprofile_sysclass);
 +}
 +
-+/*
-+ * ServerWorks: PCI interrupts mapped to system IRQ lines through Index
-+ * and Redirect I/O registers (0x0c00 and 0x0c01).  The Index register
-+ * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a.  The Redirect
-+ * register is a straight binary coding of desired PIC IRQ (low nibble).
-+ *
-+ * The 'link' value in the PIRQ table is already in the correct format
-+ * for the Index register.  There are some special index values:
-+ * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
-+ * and 0x03 for SMBus.
-+ */
-+static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++#else
++#define init_driverfs() do { } while (0)
++#define exit_driverfs() do { } while (0)
++#endif /* CONFIG_PM */
++
++unsigned long long oprofile_samples = 0;
++unsigned long long p_oprofile_samples = 0;
++
++unsigned int pdomains;
++struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
++
++static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
 +{
-+	outb_p(pirq, 0xc00);
-+	return inb(0xc01) & 0xf;
++	int head, tail, size;
++
++	head = buf->event_head;
++	tail = buf->event_tail;
++	size = buf->event_size;
++
++	if (tail > head) {
++		while (tail < size) {
++			oprofile_add_pc(buf->event_log[tail].eip,
++					buf->event_log[tail].mode,
++					buf->event_log[tail].event);
++			if (!is_passive)
++				oprofile_samples++;
++			else
++				p_oprofile_samples++;
++			tail++;
++		}
++		tail = 0;
++	}
++	while (tail < head) {
++		oprofile_add_pc(buf->event_log[tail].eip,
++				buf->event_log[tail].mode,
++				buf->event_log[tail].event);
++		if (!is_passive)
++			oprofile_samples++;
++		else
++			p_oprofile_samples++;
++		tail++;
++	}
++
++	buf->event_tail = tail;
 +}
 +
-+static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++static void xenoprof_handle_passive(void)
 +{
-+	outb_p(pirq, 0xc00);
-+	outb_p(irq, 0xc01);
-+	return 1;
++	int i, j;
++	int flag_domain, flag_switch = 0;
++	
++	for (i = 0; i < pdomains; i++) {
++		flag_domain = 0;
++		for (j = 0; j < passive_domains[i].nbuf; j++) {
++			xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
++			if (buf->event_head == buf->event_tail)
++				continue;
++			if (!flag_domain) {
++				if (!oprofile_add_domain_switch(passive_domains[i].
++								domain_id))
++					goto done;
++				flag_domain = 1;
++			}
++			xenoprof_add_pc(buf, 1);
++			flag_switch = 1;
++		}
++	}
++done:
++	if (flag_switch)
++		oprofile_add_domain_switch(COORDINATOR_DOMAIN);
 +}
 +
-+/* Support for AMD756 PCI IRQ Routing
-+ * Jhon H. Caicedo <jhcaiced at osso.org.co>
-+ * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
-+ * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
-+ * The AMD756 pirq rules are nibble-based
-+ * offset 0x56 0-3 PIRQA  4-7  PIRQB
-+ * offset 0x57 0-3 PIRQC  4-7  PIRQD
-+ */
-+static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++static irqreturn_t 
++xenoprof_ovf_interrupt(int irq, void * dev_id)
 +{
-+	u8 irq;
-+	irq = 0;
-+	if (pirq <= 4)
-+	{
-+		irq = read_config_nybble(router, 0x56, pirq - 1);
++	struct xenoprof_buf * buf;
++	int cpu;
++	static unsigned long flag;
++
++	cpu = smp_processor_id();
++	buf = xenoprof_buf[cpu];
++
++	xenoprof_add_pc(buf, 0);
++
++	if (is_primary && !test_and_set_bit(0, &flag)) {
++		xenoprof_handle_passive();
++		smp_mb__before_clear_bit();
++		clear_bit(0, &flag);
 +	}
-+	printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
-+		dev->vendor, dev->device, pirq, irq);
-+	return irq;
++
++	return IRQ_HANDLED;
 +}
 +
-+static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++
++static void unbind_virq(void)
 +{
-+	printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n", 
-+		dev->vendor, dev->device, pirq, irq);
-+	if (pirq <= 4)
-+	{
-+		write_config_nybble(router, 0x56, pirq - 1, irq);
++	int i;
++
++	for_each_possible_cpu(i) {
++		if (ovf_irq[i] >= 0) {
++			unbind_from_irqhandler(ovf_irq[i], NULL);
++			ovf_irq[i] = -1;
++		}
 +	}
-+	return 1;
 +}
 +
-+#ifdef CONFIG_PCI_BIOS
 +
-+static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++static int bind_virq(void)
 +{
-+	struct pci_dev *bridge;
-+	int pin = pci_get_interrupt_pin(dev, &bridge);
-+	return pcibios_set_irq_routing(bridge, pin, irq);
++	int i, result;
++
++	for_each_possible_cpu(i) {
++		result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
++						 i,
++						 xenoprof_ovf_interrupt,
++						 SA_INTERRUPT,
++						 "xenoprof",
++						 NULL);
++
++		if (result < 0) {
++			unbind_virq();
++			return result;
++		}
++
++		ovf_irq[i] = result;
++	}
++		
++	return 0;
 +}
 +
-+#endif
 +
-+static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++static int map_xenoprof_buffer(int max_samples)
 +{
-+	static struct pci_device_id __initdata pirq_440gx[] = {
-+		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
-+		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
-+		{ },
-+	};
++	struct xenoprof_get_buffer get_buffer;
++	struct xenoprof_buf *buf;
++	int npages, ret, i;
++	struct vm_struct *area;
 +
-+	/* 440GX has a proprietary PIRQ router -- don't use it */
-+	if (pci_dev_present(pirq_440gx))
++	if ( shared_buffer )
 +		return 0;
 +
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_INTEL_82371FB_0:
-+		case PCI_DEVICE_ID_INTEL_82371SB_0:
-+		case PCI_DEVICE_ID_INTEL_82371AB_0:
-+		case PCI_DEVICE_ID_INTEL_82371MX:
-+		case PCI_DEVICE_ID_INTEL_82443MX_0:
-+		case PCI_DEVICE_ID_INTEL_82801AA_0:
-+		case PCI_DEVICE_ID_INTEL_82801AB_0:
-+		case PCI_DEVICE_ID_INTEL_82801BA_0:
-+		case PCI_DEVICE_ID_INTEL_82801BA_10:
-+		case PCI_DEVICE_ID_INTEL_82801CA_0:
-+		case PCI_DEVICE_ID_INTEL_82801CA_12:
-+		case PCI_DEVICE_ID_INTEL_82801DB_0:
-+		case PCI_DEVICE_ID_INTEL_82801E_0:
-+		case PCI_DEVICE_ID_INTEL_82801EB_0:
-+		case PCI_DEVICE_ID_INTEL_ESB_1:
-+		case PCI_DEVICE_ID_INTEL_ICH6_0:
-+		case PCI_DEVICE_ID_INTEL_ICH6_1:
-+		case PCI_DEVICE_ID_INTEL_ICH7_0:
-+		case PCI_DEVICE_ID_INTEL_ICH7_1:
-+		case PCI_DEVICE_ID_INTEL_ICH7_30:
-+		case PCI_DEVICE_ID_INTEL_ICH7_31:
-+		case PCI_DEVICE_ID_INTEL_ESB2_0:
-+		case PCI_DEVICE_ID_INTEL_ICH8_0:
-+		case PCI_DEVICE_ID_INTEL_ICH8_1:
-+		case PCI_DEVICE_ID_INTEL_ICH8_2:
-+		case PCI_DEVICE_ID_INTEL_ICH8_3:
-+		case PCI_DEVICE_ID_INTEL_ICH8_4:
-+			r->name = "PIIX/ICH";
-+			r->get = pirq_piix_get;
-+			r->set = pirq_piix_set;
-+			return 1;
++	get_buffer.max_samples = max_samples;
++
++	if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, &get_buffer)) )
++		return ret;
++
++	nbuf = get_buffer.nbuf;
++	npages = (get_buffer.bufsize * nbuf - 1) / PAGE_SIZE + 1;
++
++	area = alloc_vm_area(npages * PAGE_SIZE);
++	if (area == NULL)
++		return -ENOMEM;
++
++	if ( (ret = direct_kernel_remap_pfn_range(
++		      (unsigned long)area->addr,
++		      get_buffer.buf_maddr >> PAGE_SHIFT,
++		      npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE), DOMID_SELF)) ) {
++		vunmap(area->addr);
++		return ret;
 +	}
++
++	shared_buffer = area->addr;
++	for (i=0; i< nbuf; i++) {
++		buf = (struct xenoprof_buf*) 
++			&shared_buffer[i * get_buffer.bufsize];
++		BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
++		xenoprof_buf[buf->vcpu_id] = buf;
++	}
++
 +	return 0;
 +}
 +
-+static __init int via_router_probe(struct irq_router *r,
-+				struct pci_dev *router, u16 device)
++
++static int xenoprof_setup(void)
 +{
-+	/* FIXME: We should move some of the quirk fixup stuff here */
++	int ret;
++	int i;
 +
-+	/*
-+	 * work arounds for some buggy BIOSes
-+	 */
-+	if (device == PCI_DEVICE_ID_VIA_82C586_0) {
-+		switch(router->device) {
-+		case PCI_DEVICE_ID_VIA_82C686:
-+			/*
-+			 * Asus k7m bios wrongly reports 82C686A
-+			 * as 586-compatible
-+			 */
-+			device = PCI_DEVICE_ID_VIA_82C686;
-+			break;
-+		case PCI_DEVICE_ID_VIA_8235:
-+			/**
-+			 * Asus a7v-x bios wrongly reports 8235
-+			 * as 586-compatible
-+			 */
-+			device = PCI_DEVICE_ID_VIA_8235;
-+			break;
++	if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
++		return ret;
++
++	if ( (ret = bind_virq()) )
++		return ret;
++
++	if (is_primary) {
++		struct xenoprof_counter counter;
++
++		/* Define dom0 as an active domain if not done yet */
++		if (!active_defined) {
++			domid_t domid;
++			ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
++			if (ret)
++				goto err;
++			domid = 0;
++			ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++			if (ret)
++				goto err;
++			active_defined = 1;
++		}
++
++		ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
++		if (ret)
++			goto err;
++		for (i=0; i<num_events; i++) {
++			counter.ind       = i;
++			counter.count     = (uint64_t)counter_config[i].count;
++			counter.enabled   = (uint32_t)counter_config[i].enabled;
++			counter.event     = (uint32_t)counter_config[i].event;
++			counter.kernel    = (uint32_t)counter_config[i].kernel;
++			counter.user      = (uint32_t)counter_config[i].user;
++			counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
++			HYPERVISOR_xenoprof_op(XENOPROF_counter, 
++					       &counter);
 +		}
-+	}
++		ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
 +
-+	switch(device) {
-+	case PCI_DEVICE_ID_VIA_82C586_0:
-+		r->name = "VIA";
-+		r->get = pirq_via586_get;
-+		r->set = pirq_via586_set;
-+		return 1;
-+	case PCI_DEVICE_ID_VIA_82C596:
-+	case PCI_DEVICE_ID_VIA_82C686:
-+	case PCI_DEVICE_ID_VIA_8231:
-+	case PCI_DEVICE_ID_VIA_8233A:
-+	case PCI_DEVICE_ID_VIA_8235:
-+	case PCI_DEVICE_ID_VIA_8237:
-+		/* FIXME: add new ones for 8233/5 */
-+		r->name = "VIA";
-+		r->get = pirq_via_get;
-+		r->set = pirq_via_set;
-+		return 1;
++		if (ret)
++			goto err;
 +	}
-+	return 0;
-+}
 +
-+static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_VLSI_82C534:
-+			r->name = "VLSI 82C534";
-+			r->get = pirq_vlsi_get;
-+			r->set = pirq_vlsi_set;
-+			return 1;
-+	}
++	ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
++	if (ret)
++		goto err;
++
++	xenoprof_enabled = 1;
 +	return 0;
++ err:
++	unbind_virq();
++	return ret;
 +}
 +
 +
-+static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++static void xenoprof_shutdown(void)
 +{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_SERVERWORKS_OSB4:
-+		case PCI_DEVICE_ID_SERVERWORKS_CSB5:
-+			r->name = "ServerWorks";
-+			r->get = pirq_serverworks_get;
-+			r->set = pirq_serverworks_set;
-+			return 1;
-+	}
-+	return 0;
-+}
++	xenoprof_enabled = 0;
 +
-+static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	if (device != PCI_DEVICE_ID_SI_503)
-+		return 0;
-+		
-+	r->name = "SIS";
-+	r->get = pirq_sis_get;
-+	r->set = pirq_sis_set;
-+	return 1;
-+}
++	HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL);
 +
-+static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_CYRIX_5520:
-+			r->name = "NatSemi";
-+			r->get = pirq_cyrix_get;
-+			r->set = pirq_cyrix_set;
-+			return 1;
++	if (is_primary) {
++		HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL);
++		active_defined = 0;
 +	}
-+	return 0;
-+}
 +
-+static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_OPTI_82C700:
-+			r->name = "OPTI";
-+			r->get = pirq_opti_get;
-+			r->set = pirq_opti_set;
-+			return 1;
-+	}
-+	return 0;
-+}
++	unbind_virq();
 +
-+static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_ITE_IT8330G_0:
-+			r->name = "ITE";
-+			r->get = pirq_ite_get;
-+			r->set = pirq_ite_set;
-+			return 1;
-+	}
-+	return 0;
 +}
 +
-+static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++
++static int xenoprof_start(void)
 +{
-+	switch(device)
-+	{
-+	case PCI_DEVICE_ID_AL_M1533:
-+	case PCI_DEVICE_ID_AL_M1563:
-+		printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n");
-+		r->name = "ALI";
-+		r->get = pirq_ali_get;
-+		r->set = pirq_ali_set;
-+		return 1;
-+	}
-+	return 0;
++	int ret = 0;
++
++	if (is_primary)
++		ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
++
++	return ret;
 +}
 +
-+static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++
++static void xenoprof_stop(void)
 +{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_AMD_VIPER_740B:
-+			r->name = "AMD756";
-+			break;
-+		case PCI_DEVICE_ID_AMD_VIPER_7413:
-+			r->name = "AMD766";
-+			break;
-+		case PCI_DEVICE_ID_AMD_VIPER_7443:
-+			r->name = "AMD768";
-+			break;
-+		default:
-+			return 0;
-+	}
-+	r->get = pirq_amd756_get;
-+	r->set = pirq_amd756_set;
-+	return 1;
++	if (is_primary)
++		HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL);
 +}
-+		
-+static __initdata struct irq_router_handler pirq_routers[] = {
-+	{ PCI_VENDOR_ID_INTEL, intel_router_probe },
-+	{ PCI_VENDOR_ID_AL, ali_router_probe },
-+	{ PCI_VENDOR_ID_ITE, ite_router_probe },
-+	{ PCI_VENDOR_ID_VIA, via_router_probe },
-+	{ PCI_VENDOR_ID_OPTI, opti_router_probe },
-+	{ PCI_VENDOR_ID_SI, sis_router_probe },
-+	{ PCI_VENDOR_ID_CYRIX, cyrix_router_probe },
-+	{ PCI_VENDOR_ID_VLSI, vlsi_router_probe },
-+	{ PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
-+	{ PCI_VENDOR_ID_AMD, amd_router_probe },
-+	/* Someone with docs needs to add the ATI Radeon IGP */
-+	{ 0, NULL }
-+};
-+static struct irq_router pirq_router;
-+static struct pci_dev *pirq_router_dev;
 +
 +
-+/*
-+ *	FIXME: should we have an option to say "generic for
-+ *	chipset" ?
-+ */
-+ 
-+static void __init pirq_find_router(struct irq_router *r)
++static int xenoprof_set_active(int * active_domains,
++			       unsigned int adomains)
 +{
-+	struct irq_routing_table *rt = pirq_table;
-+	struct irq_router_handler *h;
++	int ret = 0;
++	int i;
++	int set_dom0 = 0;
++	domid_t domid;
 +
-+#ifdef CONFIG_PCI_BIOS
-+	if (!rt->signature) {
-+		printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
-+		r->set = pirq_bios_set;
-+		r->name = "BIOS";
-+		return;
-+	}
-+#endif
++	if (!is_primary)
++		return 0;
 +
-+	/* Default unless a driver reloads it */
-+	r->name = "default";
-+	r->get = NULL;
-+	r->set = NULL;
-+	
-+	DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
-+	    rt->rtr_vendor, rt->rtr_device);
++	if (adomains > MAX_OPROF_DOMAINS)
++		return -E2BIG;
 +
-+	pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
-+	if (!pirq_router_dev) {
-+		DBG(KERN_DEBUG "PCI: Interrupt router not found at "
-+			"%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
-+		return;
-+	}
++	ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
++	if (ret)
++		return ret;
 +
-+	for( h = pirq_routers; h->vendor; h++) {
-+		/* First look for a router match */
-+		if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device))
-+			break;
-+		/* Fall back to a device match */
-+		if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device))
-+			break;
++	for (i=0; i<adomains; i++) {
++		domid = active_domains[i];
++		if (domid != active_domains[i]) {
++			ret = -EINVAL;
++			goto out;
++		}
++		ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++		if (ret)
++			goto out;
++		if (active_domains[i] == 0)
++			set_dom0 = 1;
++	}
++	/* dom0 must always be active but may not be in the list */ 
++	if (!set_dom0) {
++		domid = 0;
++		ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
 +	}
-+	printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
-+		pirq_router.name,
-+		pirq_router_dev->vendor,
-+		pirq_router_dev->device,
-+		pci_name(pirq_router_dev));
-+}
-+
-+static struct irq_info *pirq_get_info(struct pci_dev *dev)
-+{
-+	struct irq_routing_table *rt = pirq_table;
-+	int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
-+	struct irq_info *info;
 +
-+	for (info = rt->slots; entries--; info++)
-+		if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
-+			return info;
-+	return NULL;
++out:
++	if (ret)
++		HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
++	active_defined = !ret;
++	return ret;
 +}
 +
-+static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
++static int xenoprof_set_passive(int * p_domains,
++                                unsigned int pdoms)
 +{
-+	u8 pin;
-+	struct irq_info *info;
-+	int i, pirq, newirq;
-+	int irq = 0;
-+	u32 mask;
-+	struct irq_router *r = &pirq_router;
-+	struct pci_dev *dev2 = NULL;
-+	char *msg = NULL;
-+
-+	/* Find IRQ pin */
-+	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+	if (!pin) {
-+		DBG(KERN_DEBUG " -> no interrupt pin\n");
-+		return 0;
-+	}
-+	pin = pin - 1;
++	int ret;
++	int i, j;
++	int npages;
++	struct xenoprof_buf *buf;
++	struct vm_struct *area;
++	pgprot_t prot = __pgprot(_KERNPG_TABLE);
 +
-+	/* Find IRQ routing entry */
++	if (!is_primary)
++        	return 0;
 +
-+	if (!pirq_table)
-+		return 0;
-+	
-+	DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin);
-+	info = pirq_get_info(dev);
-+	if (!info) {
-+		DBG(" -> not found in routing table\n" KERN_DEBUG);
-+		return 0;
-+	}
-+	pirq = info->irq[pin].link;
-+	mask = info->irq[pin].bitmap;
-+	if (!pirq) {
-+		DBG(" -> not routed\n" KERN_DEBUG);
-+		return 0;
-+	}
-+	DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
-+	mask &= pcibios_irq_mask;
++	if (pdoms > MAX_OPROF_DOMAINS)
++		return -E2BIG;
 +
-+	/* Work around broken HP Pavilion Notebooks which assign USB to
-+	   IRQ 9 even though it is actually wired to IRQ 11 */
++	ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
++	if (ret)
++		return ret;
 +
-+	if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
-+		dev->irq = 11;
-+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
-+		r->set(pirq_router_dev, dev, pirq, 11);
-+	}
++	for (i = 0; i < pdoms; i++) {
++		passive_domains[i].domain_id = p_domains[i];
++		passive_domains[i].max_samples = 2048;
++		ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive,
++					     &passive_domains[i]);
++		if (ret)
++			goto out;
 +
-+	/* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
-+	if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) {
-+		pirq = 0x68;
-+		mask = 0x400;
-+		dev->irq = r->get(pirq_router_dev, dev, pirq);
-+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
-+	}
++		npages = (passive_domains[i].bufsize * passive_domains[i].nbuf - 1) / PAGE_SIZE + 1;
 +
-+	/*
-+	 * Find the best IRQ to assign: use the one
-+	 * reported by the device if possible.
-+	 */
-+	newirq = dev->irq;
-+	if (newirq && !((1 << newirq) & mask)) {
-+		if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
-+		else printk("\n" KERN_WARNING
-+			"PCI: IRQ %i for device %s doesn't match PIRQ mask "
-+			"- try pci=usepirqmask\n" KERN_DEBUG, newirq,
-+			pci_name(dev));
-+	}
-+	if (!newirq && assign) {
-+		for (i = 0; i < 16; i++) {
-+			if (!(mask & (1 << i)))
-+				continue;
-+			if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, IRQF_SHARED))
-+				newirq = i;
++		area = alloc_vm_area(npages * PAGE_SIZE);
++		if (area == NULL) {
++			ret = -ENOMEM;
++			goto out;
 +		}
-+	}
-+	DBG(" -> newirq=%d", newirq);
 +
-+	/* Check if it is hardcoded */
-+	if ((pirq & 0xf0) == 0xf0) {
-+		irq = pirq & 0xf;
-+		DBG(" -> hardcoded IRQ %d\n", irq);
-+		msg = "Hardcoded";
-+	} else if ( r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
-+	((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
-+		DBG(" -> got IRQ %d\n", irq);
-+		msg = "Found";
-+		eisa_set_level_irq(irq);
-+	} else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
-+		DBG(" -> assigning IRQ %d", newirq);
-+		if (r->set(pirq_router_dev, dev, pirq, newirq)) {
-+			eisa_set_level_irq(newirq);
-+			DBG(" ... OK\n");
-+			msg = "Assigned";
-+			irq = newirq;
++		ret = direct_kernel_remap_pfn_range(
++			(unsigned long)area->addr,
++			passive_domains[i].buf_maddr >> PAGE_SHIFT,
++			npages * PAGE_SIZE, prot, DOMID_SELF);
++		if (ret) {
++			vunmap(area->addr);
++			goto out;
 +		}
-+	}
 +
-+	if (!irq) {
-+		DBG(" ... failed\n");
-+		if (newirq && mask == (1 << newirq)) {
-+			msg = "Guessed";
-+			irq = newirq;
-+		} else
-+			return 0;
-+	}
-+	printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev));
++		p_shared_buffer[i] = area->addr;
 +
-+	/* Update IRQ for all devices with the same pirq value */
-+	while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
-+		pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
-+		if (!pin)
-+			continue;
-+		pin--;
-+		info = pirq_get_info(dev2);
-+		if (!info)
-+			continue;
-+		if (info->irq[pin].link == pirq) {
-+			/* We refuse to override the dev->irq information. Give a warning! */
-+		    	if ( dev2->irq && dev2->irq != irq && \
-+			(!(pci_probe & PCI_USE_PIRQ_MASK) || \
-+			((1 << dev2->irq) & mask)) ) {
-+#ifndef CONFIG_PCI_MSI
-+		    		printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
-+				       pci_name(dev2), dev2->irq, irq);
-+#endif
-+		    		continue;
-+		    	}
-+			dev2->irq = irq;
-+			pirq_penalty[irq]++;
-+			if (dev != dev2)
-+				printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2));
++		for (j = 0; j < passive_domains[i].nbuf; j++) {
++			buf = (struct xenoprof_buf *)
++				&p_shared_buffer[i][j * passive_domains[i].bufsize];
++			BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
++			p_xenoprof_buf[i][buf->vcpu_id] = buf;
 +		}
-+	}
-+	return 1;
-+}
-+
-+static void __init pcibios_fixup_irqs(void)
-+{
-+	struct pci_dev *dev = NULL;
-+	u8 pin;
-+
-+	DBG(KERN_DEBUG "PCI: IRQ fixup\n");
-+	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-+		/*
-+		 * If the BIOS has set an out of range IRQ number, just ignore it.
-+		 * Also keep track of which IRQ's are already in use.
-+		 */
-+		if (dev->irq >= 16) {
-+			DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
-+			dev->irq = 0;
-+		}
-+		/* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
-+		if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
-+			pirq_penalty[dev->irq] = 0;
-+		pirq_penalty[dev->irq]++;
-+	}
-+
-+	dev = NULL;
-+	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-+		pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+#ifdef CONFIG_X86_IO_APIC
-+		/*
-+		 * Recalculate IRQ numbers if we use the I/O APIC.
-+		 */
-+		if (io_apic_assign_pci_irqs)
-+		{
-+			int irq;
 +
-+			if (pin) {
-+				pin--;		/* interrupt pins are numbered starting from 1 */
-+				irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
-+	/*
-+	 * Busses behind bridges are typically not listed in the MP-table.
-+	 * In this case we have to look up the IRQ based on the parent bus,
-+	 * parent slot, and pin number. The SMP code detects such bridged
-+	 * busses itself so we should get into this branch reliably.
-+	 */
-+				if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
-+					struct pci_dev * bridge = dev->bus->self;
-+
-+					pin = (pin + PCI_SLOT(dev->devfn)) % 4;
-+					irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 
-+							PCI_SLOT(bridge->devfn), pin);
-+					if (irq >= 0)
-+						printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
-+							pci_name(bridge), 'A' + pin, irq);
-+				}
-+				if (irq >= 0) {
-+					if (use_pci_vector() &&
-+						!platform_legacy_irq(irq))
-+						irq = IO_APIC_VECTOR(irq);
-+
-+					printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
-+						pci_name(dev), 'A' + pin, irq);
-+					dev->irq = irq;
-+				}
-+			}
-+		}
-+#endif
-+		/*
-+		 * Still no IRQ? Try to lookup one...
-+		 */
-+		if (pin && !dev->irq)
-+			pcibios_lookup_irq(dev, 0);
 +	}
-+}
 +
-+/*
-+ * Work around broken HP Pavilion Notebooks which assign USB to
-+ * IRQ 9 even though it is actually wired to IRQ 11
-+ */
-+static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d)
-+{
-+	if (!broken_hp_bios_irq9) {
-+		broken_hp_bios_irq9 = 1;
-+		printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
-+	}
++	pdomains = pdoms;
 +	return 0;
-+}
 +
-+/*
-+ * Work around broken Acer TravelMate 360 Notebooks which assign
-+ * Cardbus to IRQ 11 even though it is actually wired to IRQ 10
-+ */
-+static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d)
-+{
-+	if (!acer_tm360_irqrouting) {
-+		acer_tm360_irqrouting = 1;
-+		printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
++out:
++	for (j = 0; j < i; j++) {
++		vunmap(p_shared_buffer[j]);
++		p_shared_buffer[j] = NULL;
 +	}
-+	return 0;
++
++ 	return ret;
 +}
 +
-+static struct dmi_system_id __initdata pciirq_dmi_table[] = {
-+	{
-+		.callback = fix_broken_hp_bios_irq9,
-+		.ident = "HP Pavilion N5400 Series Laptop",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-+			DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
-+			DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"),
-+			DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
-+		},
-+	},
-+	{
-+		.callback = fix_acer_tm360_irqrouting,
-+		.ident = "Acer TravelMate 36x Laptop",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
-+		},
-+	},
-+	{ }
-+};
++struct op_counter_config counter_config[OP_MAX_COUNTER];
 +
-+static int __init pcibios_irq_init(void)
++static int xenoprof_create_files(struct super_block * sb, struct dentry * root)
 +{
-+	DBG(KERN_DEBUG "PCI: IRQ init\n");
-+
-+	if (pcibios_enable_irq || raw_pci_ops == NULL)
-+		return 0;
-+
-+	dmi_check_system(pciirq_dmi_table);
-+
-+	pirq_table = pirq_find_routing_table();
++	unsigned int i;
 +
-+#ifdef CONFIG_PCI_BIOS
-+	if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
-+		pirq_table = pcibios_get_irq_routing_table();
-+#endif
-+	if (pirq_table) {
-+		pirq_peer_trick();
-+		pirq_find_router(&pirq_router);
-+		if (pirq_table->exclusive_irqs) {
-+			int i;
-+			for (i=0; i<16; i++)
-+				if (!(pirq_table->exclusive_irqs & (1 << i)))
-+					pirq_penalty[i] += 100;
-+		}
-+		/* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
-+		if (io_apic_assign_pci_irqs)
-+			pirq_table = NULL;
++	for (i = 0; i < num_events; ++i) {
++		struct dentry * dir;
++		char buf[2];
++ 
++		snprintf(buf, 2, "%d", i);
++		dir = oprofilefs_mkdir(sb, root, buf);
++		oprofilefs_create_ulong(sb, dir, "enabled",
++					&counter_config[i].enabled);
++		oprofilefs_create_ulong(sb, dir, "event",
++					&counter_config[i].event);
++		oprofilefs_create_ulong(sb, dir, "count",
++					&counter_config[i].count);
++		oprofilefs_create_ulong(sb, dir, "unit_mask",
++					&counter_config[i].unit_mask);
++		oprofilefs_create_ulong(sb, dir, "kernel",
++					&counter_config[i].kernel);
++		oprofilefs_create_ulong(sb, dir, "user",
++					&counter_config[i].user);
 +	}
 +
-+	pcibios_enable_irq = pirq_enable_irq;
-+
-+	pcibios_fixup_irqs();
 +	return 0;
 +}
 +
-+subsys_initcall(pcibios_irq_init);
 +
++struct oprofile_operations xenoprof_ops = {
++	.create_files 	= xenoprof_create_files,
++	.set_active	= xenoprof_set_active,
++	.set_passive    = xenoprof_set_passive,
++	.setup 		= xenoprof_setup,
++	.shutdown	= xenoprof_shutdown,
++	.start		= xenoprof_start,
++	.stop		= xenoprof_stop
++};
 +
-+static void pirq_penalize_isa_irq(int irq, int active)
-+{
-+	/*
-+	 *  If any ISAPnP device reports an IRQ in its list of possible
-+	 *  IRQ's, we try to avoid assigning it to PCI devices.
-+	 */
-+	if (irq < 16) {
-+		if (active)
-+			pirq_penalty[irq] += 1000;
-+		else
-+			pirq_penalty[irq] += 100;
-+	}
-+}
 +
-+void pcibios_penalize_isa_irq(int irq, int active)
-+{
-+#ifdef CONFIG_ACPI
-+	if (!acpi_noirq)
-+		acpi_penalize_isa_irq(irq, active);
-+	else
-+#endif
-+		pirq_penalize_isa_irq(irq, active);
-+}
++/* in order to get driverfs right */
++static int using_xenoprof;
 +
-+static int pirq_enable_irq(struct pci_dev *dev)
++int __init oprofile_arch_init(struct oprofile_operations * ops)
 +{
-+	u8 pin;
-+	struct pci_dev *temp_dev;
++	struct xenoprof_init init;
++	int ret, i;
 +
-+	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+	if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
-+		char *msg = "";
++	ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
 +
-+		pin--;		/* interrupt pins are numbered starting from 1 */
++	if (!ret) {
++		num_events = init.num_events;
++		is_primary = init.is_primary;
 +
-+		if (io_apic_assign_pci_irqs) {
-+			int irq;
++		/* just in case - make sure we do not overflow event list 
++		   (i.e. counter_config list) */
++		if (num_events > OP_MAX_COUNTER)
++			num_events = OP_MAX_COUNTER;
 +
-+			irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
-+			/*
-+			 * Busses behind bridges are typically not listed in the MP-table.
-+			 * In this case we have to look up the IRQ based on the parent bus,
-+			 * parent slot, and pin number. The SMP code detects such bridged
-+			 * busses itself so we should get into this branch reliably.
-+			 */
-+			temp_dev = dev;
-+			while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
-+				struct pci_dev * bridge = dev->bus->self;
-+
-+				pin = (pin + PCI_SLOT(dev->devfn)) % 4;
-+				irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 
-+						PCI_SLOT(bridge->devfn), pin);
-+				if (irq >= 0)
-+					printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
-+						pci_name(bridge), 'A' + pin, irq);
-+				dev = bridge;
-+			}
-+			dev = temp_dev;
-+			if (irq >= 0) {
-+#ifdef CONFIG_PCI_MSI
-+				if (!platform_legacy_irq(irq))
-+					irq = IO_APIC_VECTOR(irq);
-+#endif
-+				printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
-+					pci_name(dev), 'A' + pin, irq);
-+				dev->irq = irq;
-+				return 0;
-+			} else
-+				msg = " Probably buggy MP table.";
-+		} else if (pci_probe & PCI_BIOS_IRQ_SCAN)
-+			msg = "";
-+		else
-+			msg = " Please try using pci=biosirq.";
++		/*  cpu_type is detected by Xen */
++		cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
++		strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
++		xenoprof_ops.cpu_type = cpu_type;
 +
-+		/* With IDE legacy devices the IRQ lookup failure is not a problem.. */
-+		if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5))
-+			return 0;
++		init_driverfs();
++		using_xenoprof = 1;
++		*ops = xenoprof_ops;
++
++		for (i=0; i<NR_CPUS; i++)
++			ovf_irq[i] = -1;
 +
-+		printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
-+		       'A' + pin, pci_name(dev), msg);
++		active_defined = 0;
 +	}
-+	return 0;
++	printk(KERN_INFO "oprofile_arch_init: ret %d, events %d, "
++	       "is_primary %d\n", ret, num_events, is_primary);
++	return ret;
 +}
 +
-+int pci_vector_resources(int last, int nr_released)
++
++void __exit oprofile_arch_exit(void)
 +{
-+	int count = nr_released;
++	int i;
 +
-+	int next = last;
-+	int offset = (last % 8);
++	if (using_xenoprof)
++		exit_driverfs();
 +
-+	while (next < FIRST_SYSTEM_VECTOR) {
-+		next += 8;
-+#ifdef CONFIG_X86_64
-+		if (next == IA32_SYSCALL_VECTOR)
-+			continue;
-+#else
-+		if (next == SYSCALL_VECTOR)
-+			continue;
-+#endif
-+		count++;
-+		if (next >= FIRST_SYSTEM_VECTOR) {
-+			if (offset%8) {
-+				next = FIRST_DEVICE_VECTOR + offset;
-+				offset++;
-+				continue;
-+			}
-+			count--;
-+		}
++	if (shared_buffer) {
++		vunmap(shared_buffer);
++		shared_buffer = NULL;
 +	}
++	if (is_primary) {
++		for (i = 0; i < pdomains; i++)
++			if (p_shared_buffer[i]) {
++		                vunmap(p_shared_buffer[i]);
++                		p_shared_buffer[i] = NULL;
++			}
++		HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);
++        }
 +
-+	return count;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/pci/Makefile linux-2.6.18-xen/arch/i386/pci/Makefile
---- linux-2.6.18.3/arch/i386/pci/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/pci/Makefile	2006-11-19 14:26:22.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
+--- a/arch/i386/pci/irq.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/pci/irq.c	2007-03-14 10:55:14.000000000 +0100
+@@ -94,13 +94,25 @@
+ 	u8 *addr;
+ 	struct irq_routing_table *rt;
+ 
++#ifdef CONFIG_XEN
++	if (!is_initial_xendomain())
++		return NULL;
++#endif
+ 	if (pirq_table_addr) {
++#ifdef CONFIG_XEN
++		rt = pirq_check_routing_table((u8 *) isa_bus_to_virt(pirq_table_addr));
++#else
+ 		rt = pirq_check_routing_table((u8 *) __va(pirq_table_addr));
++#endif
+ 		if (rt)
+ 			return rt;
+ 		printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n");
+ 	}
++#ifdef CONFIG_XEN
++	for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
++#else
+ 	for(addr = (u8 *) __va(0xf0000); addr < (u8 *) __va(0x100000); addr += 16) {
++#endif
+ 		rt = pirq_check_routing_table(addr);
+ 		if (rt)
+ 			return rt;
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/pci/Makefile b/arch/i386/pci/Makefile
+--- a/arch/i386/pci/Makefile	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/pci/Makefile	2007-03-14 10:55:14.000000000 +0100
 @@ -4,6 +4,10 @@
  obj-$(CONFIG_PCI_MMCONFIG)	+= mmconfig.o direct.o
  obj-$(CONFIG_PCI_DIRECT)	+= direct.o
@@ -21703,15 +17728,15 @@
 @@ -12,3 +16,8 @@
  pci-$(CONFIG_X86_NUMAQ)		:= numa.o irq.o
  
- obj-y				+= $(pci-y) common.o
+ obj-y				+= $(pci-y) common.o early.o
 +
 +ifdef CONFIG_XEN
 +include $(srctree)/scripts/Makefile.xen
 +obj-y := $(call cherrypickxen, $(obj-y))
 +endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/pci/pcifront.c linux-2.6.18-xen/arch/i386/pci/pcifront.c
---- linux-2.6.18.3/arch/i386/pci/pcifront.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/i386/pci/pcifront.c	2006-11-19 14:26:22.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/pci/pcifront.c b/arch/i386/pci/pcifront.c
+--- a/arch/i386/pci/pcifront.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/i386/pci/pcifront.c	2007-03-14 10:55:14.000000000 +0100
 @@ -0,0 +1,55 @@
 +/*
 + * PCI Frontend Stub - puts some "dummy" functions in to the Linux x86 PCI core
@@ -21768,18 +17793,18 @@
 +}
 +
 +arch_initcall(pcifront_x86_stub_init);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/i386/power/Makefile linux-2.6.18-xen/arch/i386/power/Makefile
---- linux-2.6.18.3/arch/i386/power/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/i386/power/Makefile	2006-11-19 14:26:22.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/i386/power/Makefile b/arch/i386/power/Makefile
+--- a/arch/i386/power/Makefile	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/i386/power/Makefile	2007-03-14 10:55:14.000000000 +0100
 @@ -1,2 +1,4 @@
 -obj-$(CONFIG_PM)		+= cpu.o
 +obj-$(CONFIG_PM_LEGACY)		+= cpu.o
 +obj-$(CONFIG_SOFTWARE_SUSPEND)	+= cpu.o
 +obj-$(CONFIG_ACPI_SLEEP)	+= cpu.o
- obj-$(CONFIG_SOFTWARE_SUSPEND)	+= swsusp.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/dig/setup.c linux-2.6.18-xen/arch/ia64/dig/setup.c
---- linux-2.6.18.3/arch/ia64/dig/setup.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/dig/setup.c	2006-11-19 14:26:22.000000000 +0100
+ obj-$(CONFIG_SOFTWARE_SUSPEND)	+= swsusp.o suspend.o
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/dig/setup.c b/arch/ia64/dig/setup.c
+--- a/arch/ia64/dig/setup.c	2007-03-12 21:58:05.000000000 +0100
++++ b/arch/ia64/dig/setup.c	2007-03-14 10:55:14.000000000 +0100
 @@ -24,6 +24,8 @@
  #include <asm/machvec.h>
  #include <asm/system.h>
@@ -21809,19 +17834,19 @@
 +	xen_start_info->console.domU.evtchn = 0;
 +#endif
  }
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/hp/sim/Makefile linux-2.6.18-xen/arch/ia64/hp/sim/Makefile
---- linux-2.6.18.3/arch/ia64/hp/sim/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/hp/sim/Makefile	2006-11-19 14:26:22.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/hp/sim/Makefile b/arch/ia64/hp/sim/Makefile
+--- a/arch/ia64/hp/sim/Makefile	2007-03-12 21:58:05.000000000 +0100
++++ b/arch/ia64/hp/sim/Makefile	2007-03-14 10:55:14.000000000 +0100
 @@ -14,3 +14,5 @@
  obj-$(CONFIG_HP_SIMSERIAL) += simserial.o
  obj-$(CONFIG_HP_SIMSERIAL_CONSOLE) += hpsim_console.o
  obj-$(CONFIG_HP_SIMSCSI) += simscsi.o
 +obj-$(CONFIG_XEN) += simserial.o
 +obj-$(CONFIG_XEN) += hpsim_console.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/Kconfig linux-2.6.18-xen/arch/ia64/Kconfig
---- linux-2.6.18.3/arch/ia64/Kconfig	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/Kconfig	2006-11-19 14:26:22.000000000 +0100
-@@ -58,6 +58,20 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/Kconfig b/arch/ia64/Kconfig
+--- a/arch/ia64/Kconfig	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/ia64/Kconfig	2007-03-14 10:55:14.000000000 +0100
+@@ -67,6 +67,20 @@
  	bool
  	default y
  
@@ -21842,7 +17867,7 @@
  config SCHED_NO_NO_OMIT_FRAME_POINTER
  	bool
  	default y
-@@ -465,6 +479,21 @@
+@@ -500,6 +514,21 @@
  	bool
  	default PCI
  
@@ -21864,7 +17889,7 @@
  source "drivers/pci/pcie/Kconfig"
  
  source "drivers/pci/Kconfig"
-@@ -528,3 +557,34 @@
+@@ -572,3 +601,34 @@
  source "security/Kconfig"
  
  source "crypto/Kconfig"
@@ -21899,9 +17924,9 @@
 +endif
 +
 +source "drivers/xen/Kconfig"
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/kernel/asm-offsets.c linux-2.6.18-xen/arch/ia64/kernel/asm-offsets.c
---- linux-2.6.18.3/arch/ia64/kernel/asm-offsets.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/kernel/asm-offsets.c	2006-11-19 14:26:22.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
+--- a/arch/ia64/kernel/asm-offsets.c	2007-03-15 15:56:05.000000000 +0100
++++ b/arch/ia64/kernel/asm-offsets.c	2007-03-14 10:55:14.000000000 +0100
 @@ -268,4 +268,29 @@
  	DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
  	DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
@@ -21932,9 +17957,9 @@
 +	DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
 +#endif /* CONFIG_XEN */
  }
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/kernel/entry.S linux-2.6.18-xen/arch/ia64/kernel/entry.S
---- linux-2.6.18.3/arch/ia64/kernel/entry.S	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/kernel/entry.S	2006-11-19 14:26:22.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
+--- a/arch/ia64/kernel/entry.S	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/ia64/kernel/entry.S	2007-03-14 10:55:14.000000000 +0100
 @@ -180,7 +180,7 @@
   *	called.  The code starting at .map relies on this.  The rest of the code
   *	doesn't care about the interrupt masking status.
@@ -22054,9 +18079,9 @@
  	;;
  	mov ar.unat=r9
  	br.many b7
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/kernel/gate.lds.S linux-2.6.18-xen/arch/ia64/kernel/gate.lds.S
---- linux-2.6.18.3/arch/ia64/kernel/gate.lds.S	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/kernel/gate.lds.S	2006-11-19 14:26:22.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S
+--- a/arch/ia64/kernel/gate.lds.S	2007-03-12 21:58:05.000000000 +0100
++++ b/arch/ia64/kernel/gate.lds.S	2007-03-14 10:55:14.000000000 +0100
 @@ -43,6 +43,28 @@
  				    __start_gate_brl_fsys_bubble_down_patchlist = .;
  				    *(.data.patch.brl_fsys_bubble_down)
@@ -22086,9 +18111,9 @@
    }									:readable
    .IA_64.unwind_info		: { *(.IA_64.unwind_info*) }
    .IA_64.unwind			: { *(.IA_64.unwind*) }			:readable :unwind
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/kernel/gate.S linux-2.6.18-xen/arch/ia64/kernel/gate.S
---- linux-2.6.18.3/arch/ia64/kernel/gate.S	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/kernel/gate.S	2006-11-19 14:26:22.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
+--- a/arch/ia64/kernel/gate.S	2007-03-12 21:58:05.000000000 +0100
++++ b/arch/ia64/kernel/gate.S	2007-03-14 10:55:14.000000000 +0100
 @@ -6,13 +6,15 @@
   * 	David Mosberger-Tang <davidm at hpl.hp.com>
   */
@@ -22258,9 +18283,9 @@
  (p9)	mov r8=ENOSYS
  	FSYS_RETURN
  END(__kernel_syscall_via_epc)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/kernel/head.S linux-2.6.18-xen/arch/ia64/kernel/head.S
---- linux-2.6.18.3/arch/ia64/kernel/head.S	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/kernel/head.S	2006-11-19 14:26:22.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
+--- a/arch/ia64/kernel/head.S	2007-03-12 21:58:05.000000000 +0100
++++ b/arch/ia64/kernel/head.S	2007-03-14 10:55:14.000000000 +0100
 @@ -367,6 +367,12 @@
  	;;
  (isBP)	st8 [r2]=r28		// save the address of the boot param area passed by the bootloader
@@ -22274,9 +18299,9 @@
  #ifdef CONFIG_SMP
  (isAP)	br.call.sptk.many rp=start_secondary
  .ret0:
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/kernel/iosapic.c linux-2.6.18-xen/arch/ia64/kernel/iosapic.c
---- linux-2.6.18.3/arch/ia64/kernel/iosapic.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/kernel/iosapic.c	2006-11-19 14:26:22.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
+--- a/arch/ia64/kernel/iosapic.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/ia64/kernel/iosapic.c	2007-03-14 10:55:14.000000000 +0100
 @@ -159,6 +159,65 @@
  static int iosapic_kmalloc_ok;
  static LIST_HEAD(free_rte_list);
@@ -22343,7 +18368,7 @@
  /*
   * Find an IOSAPIC associated with a GSI
   */
-@@ -653,6 +712,9 @@
+@@ -674,6 +733,9 @@
  	iosapic_intr_info[vector].dmode    = delivery;
  	iosapic_intr_info[vector].trigger  = trigger;
  
@@ -22353,7 +18378,7 @@
  	if (trigger == IOSAPIC_EDGE)
  		irq_type = &irq_type_iosapic_edge;
  	else
-@@ -1015,6 +1077,9 @@
+@@ -1036,6 +1098,9 @@
  	}
  
  	pcat_compat = system_pcat_compat;
@@ -22363,20 +18388,21 @@
  	if (pcat_compat) {
  		/*
  		 * Disable the compatibility mode interrupts (8259 style),
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/kernel/irq_ia64.c linux-2.6.18-xen/arch/ia64/kernel/irq_ia64.c
---- linux-2.6.18.3/arch/ia64/kernel/irq_ia64.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/kernel/irq_ia64.c	2006-11-19 14:26:22.000000000 +0100
-@@ -30,6 +30,9 @@
- #include <linux/smp_lock.h>
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
+--- a/arch/ia64/kernel/irq_ia64.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/ia64/kernel/irq_ia64.c	2007-03-14 10:55:14.000000000 +0100
+@@ -31,6 +31,10 @@
  #include <linux/threads.h>
  #include <linux/bitops.h>
+ #include <linux/irq.h>
 +#ifdef CONFIG_XEN
 +#include <linux/cpu.h>
 +#endif
++
  
  #include <asm/delay.h>
  #include <asm/intrinsics.h>
-@@ -69,6 +72,13 @@
+@@ -70,6 +74,13 @@
  assign_irq_vector (int irq)
  {
  	int pos, vector;
@@ -22390,7 +18416,7 @@
   again:
  	pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
  	vector = IA64_FIRST_DEVICE_VECTOR + pos;
-@@ -240,14 +250,215 @@
+@@ -280,14 +291,215 @@
  };
  #endif
  
@@ -22606,7 +18632,7 @@
  			desc = irq_desc + irq;
  			desc->status |= IRQ_PER_CPU;
  			desc->chip = &irq_type_ia64_lsapic;
-@@ -259,6 +470,21 @@
+@@ -299,6 +511,21 @@
  void __init
  init_IRQ (void)
  {
@@ -22628,7 +18654,7 @@
  	register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
  #ifdef CONFIG_SMP
  	register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
-@@ -276,6 +502,39 @@
+@@ -317,6 +544,39 @@
  	unsigned long ipi_data;
  	unsigned long phys_cpu_id;
  
@@ -22668,9 +18694,9 @@
  #ifdef CONFIG_SMP
  	phys_cpu_id = cpu_physical_id(cpu);
  #else
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/kernel/pal.S linux-2.6.18-xen/arch/ia64/kernel/pal.S
---- linux-2.6.18.3/arch/ia64/kernel/pal.S	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/kernel/pal.S	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
+--- a/arch/ia64/kernel/pal.S	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/ia64/kernel/pal.S	2007-03-14 10:55:14.000000000 +0100
 @@ -16,6 +16,7 @@
  #include <asm/processor.h>
  
@@ -22679,16 +18705,7 @@
  pal_entry_point:
  	data8 ia64_pal_default_handler
  	.text
-@@ -53,7 +54,7 @@
-  * in4	       1 ==> clear psr.ic,  0 ==> don't clear psr.ic
-  *
-  */
--GLOBAL_ENTRY(ia64_pal_call_static)
-+GLOBAL_ENTRY(__ia64_pal_call_static)
- 	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
- 	alloc loc1 = ar.pfs,5,5,0,0
- 	movl loc2 = pal_entry_point
-@@ -90,7 +91,7 @@
+@@ -86,7 +87,7 @@
  	;;
  	srlz.d				// seralize restoration of psr.l
  	br.ret.sptk.many b0
@@ -22697,9 +18714,9 @@
  
  /*
   * Make a PAL call using the stacked registers calling convention.
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/kernel/patch.c linux-2.6.18-xen/arch/ia64/kernel/patch.c
---- linux-2.6.18.3/arch/ia64/kernel/patch.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/kernel/patch.c	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
+--- a/arch/ia64/kernel/patch.c	2007-03-12 21:58:05.000000000 +0100
++++ b/arch/ia64/kernel/patch.c	2007-03-14 10:55:14.000000000 +0100
 @@ -184,6 +184,73 @@
  	ia64_srlz_i();
  }
@@ -22785,10 +18802,10 @@
  	ia64_patch_vtop(START(vtop), END(vtop));
  	ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
  }
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/kernel/setup.c linux-2.6.18-xen/arch/ia64/kernel/setup.c
---- linux-2.6.18.3/arch/ia64/kernel/setup.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/kernel/setup.c	2006-11-19 14:26:23.000000000 +0100
-@@ -60,6 +60,10 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
+--- a/arch/ia64/kernel/setup.c	2007-03-16 18:49:20.000000000 +0100
++++ b/arch/ia64/kernel/setup.c	2007-03-14 10:55:14.000000000 +0100
+@@ -61,6 +61,10 @@
  #include <asm/system.h>
  #include <asm/unistd.h>
  #include <asm/system.h>
@@ -22799,7 +18816,7 @@
  
  #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
  # error "struct cpuinfo_ia64 too big!"
-@@ -70,6 +74,22 @@
+@@ -71,6 +75,22 @@
  EXPORT_SYMBOL(__per_cpu_offset);
  #endif
  
@@ -22822,7 +18839,7 @@
  extern void ia64_setup_printk_clock(void);
  
  DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
-@@ -176,15 +196,33 @@
+@@ -177,15 +197,33 @@
  	return 0;
  }
  
@@ -22857,7 +18874,7 @@
  				struct rsvd_region tmp;
  				tmp = rsvd_region[j];
  				rsvd_region[j] = rsvd_region[j + 1];
-@@ -192,6 +230,36 @@
+@@ -193,6 +231,36 @@
  			}
  		}
  	}
@@ -22894,7 +18911,7 @@
  }
  
  /*
-@@ -242,6 +310,14 @@
+@@ -243,6 +311,14 @@
  	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
  	n++;
  
@@ -22909,7 +18926,7 @@
  #ifdef CONFIG_BLK_DEV_INITRD
  	if (ia64_boot_param->initrd_start) {
  		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
-@@ -333,6 +409,16 @@
+@@ -376,6 +452,16 @@
  {
  	int earlycons = 0;
  
@@ -22926,7 +18943,7 @@
  #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
  	{
  		extern int sn_serial_console_early_setup(void);
-@@ -402,6 +488,14 @@
+@@ -460,6 +546,14 @@
  {
  	unw_init();
  
@@ -22941,7 +18958,7 @@
  	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
  
  	*cmdline_p = __va(ia64_boot_param->command_line);
-@@ -478,6 +572,23 @@
+@@ -538,6 +632,23 @@
  			conswitchp = &vga_con;
  # endif
  	}
@@ -22956,16 +18973,16 @@
 +		       xen_start_info->nr_pages, xen_start_info->flags);
 +
 +		if (!is_initial_xendomain()) {
-+			extern int console_use_vt;
++#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
 +			conswitchp = NULL;
-+			console_use_vt = 0;
++#endif
 +		}
 +	}
 +#endif
  #endif
  
  	/* enable IA-64 Machine Check Abort Handling unless disabled */
-@@ -486,6 +597,9 @@
+@@ -546,6 +657,9 @@
  
  	platform_setup(cmdline_p);
  	paging_init();
@@ -22975,7 +18992,7 @@
  }
  
  /*
-@@ -870,6 +984,15 @@
+@@ -951,6 +1065,15 @@
  	/* size of physical stacked register partition plus 8 bytes: */
  	__get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
  	platform_cpu_init();
@@ -22991,9 +19008,9 @@
  	pm_idle = default_idle;
  }
  
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/Makefile linux-2.6.18-xen/arch/ia64/Makefile
---- linux-2.6.18.3/arch/ia64/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/Makefile	2006-11-19 14:26:22.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/Makefile b/arch/ia64/Makefile
+--- a/arch/ia64/Makefile	2007-03-12 21:58:05.000000000 +0100
++++ b/arch/ia64/Makefile	2007-03-14 10:55:14.000000000 +0100
 @@ -45,6 +45,12 @@
  endif
  
@@ -23034,9 +19051,9 @@
  
  define archhelp
    echo '* compressed	- Build compressed kernel image'
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/mm/ioremap.c linux-2.6.18-xen/arch/ia64/mm/ioremap.c
---- linux-2.6.18.3/arch/ia64/mm/ioremap.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/ia64/mm/ioremap.c	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
+--- a/arch/ia64/mm/ioremap.c	2007-03-12 21:58:05.000000000 +0100
++++ b/arch/ia64/mm/ioremap.c	2007-03-14 10:55:15.000000000 +0100
 @@ -16,6 +16,9 @@
  static inline void __iomem *
  __ioremap (unsigned long offset, unsigned long size)
@@ -23047,15 +19064,15 @@
  	return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset);
  }
  
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/xen/drivers/README linux-2.6.18-xen/arch/ia64/xen/drivers/README
---- linux-2.6.18.3/arch/ia64/xen/drivers/README	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/ia64/xen/drivers/README	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/xen/drivers/README b/arch/ia64/xen/drivers/README
+--- a/arch/ia64/xen/drivers/README	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/ia64/xen/drivers/README	2007-03-14 10:55:15.000000000 +0100
 @@ -0,0 +1,2 @@
 +This is a temporary location for source/Makefiles that need to be
 +patched/reworked in drivers/xen to work with xenlinux/ia64.
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/xen/hypercall.S linux-2.6.18-xen/arch/ia64/xen/hypercall.S
---- linux-2.6.18.3/arch/ia64/xen/hypercall.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/ia64/xen/hypercall.S	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
+--- a/arch/ia64/xen/hypercall.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/ia64/xen/hypercall.S	2007-03-14 10:55:15.000000000 +0100
 @@ -0,0 +1,413 @@
 +/*
 + * Support routines for Xen hypercalls
@@ -23470,9 +19487,9 @@
 +	;; 
 +END(xen_ssm_i_1)
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/xen/hypervisor.c linux-2.6.18-xen/arch/ia64/xen/hypervisor.c
---- linux-2.6.18.3/arch/ia64/xen/hypervisor.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/ia64/xen/hypervisor.c	2006-12-05 18:42:36.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c
+--- a/arch/ia64/xen/hypervisor.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/ia64/xen/hypervisor.c	2007-03-14 10:55:15.000000000 +0100
 @@ -0,0 +1,847 @@
 +/******************************************************************************
 + * include/asm-ia64/shadow.h
@@ -24321,9 +20338,9 @@
 +	/* Just trigger a tick.  */
 +	ia64_cpu_local_tick();
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/xen/Makefile linux-2.6.18-xen/arch/ia64/xen/Makefile
---- linux-2.6.18.3/arch/ia64/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/ia64/xen/Makefile	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile
+--- a/arch/ia64/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/ia64/xen/Makefile	2007-03-14 10:55:15.000000000 +0100
 @@ -0,0 +1,8 @@
 +#
 +# Makefile for Xen components
@@ -24333,9 +20350,9 @@
 +	 hypervisor.o pci-dma-xen.o util.o
 +
 +pci-dma-xen-y := ../../i386/kernel/pci-dma-xen.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/xen/util.c linux-2.6.18-xen/arch/ia64/xen/util.c
---- linux-2.6.18.3/arch/ia64/xen/util.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/ia64/xen/util.c	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/xen/util.c b/arch/ia64/xen/util.c
+--- a/arch/ia64/xen/util.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/ia64/xen/util.c	2007-03-14 10:55:15.000000000 +0100
 @@ -0,0 +1,115 @@
 +/******************************************************************************
 + * arch/ia64/xen/util.c
@@ -24452,9 +20469,9 @@
 + *  tab-width: 8
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/xen/xenentry.S linux-2.6.18-xen/arch/ia64/xen/xenentry.S
---- linux-2.6.18.3/arch/ia64/xen/xenentry.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/ia64/xen/xenentry.S	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/xen/xenentry.S b/arch/ia64/xen/xenentry.S
+--- a/arch/ia64/xen/xenentry.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/ia64/xen/xenentry.S	2007-03-14 10:55:15.000000000 +0100
 @@ -0,0 +1,867 @@
 +/*
 + * ia64/xen/entry.S
@@ -25323,9 +21340,9 @@
 +#else
 +END(ia64_leave_kernel)
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/xen/xenhpski.c linux-2.6.18-xen/arch/ia64/xen/xenhpski.c
---- linux-2.6.18.3/arch/ia64/xen/xenhpski.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/ia64/xen/xenhpski.c	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/xen/xenhpski.c b/arch/ia64/xen/xenhpski.c
+--- a/arch/ia64/xen/xenhpski.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/ia64/xen/xenhpski.c	2007-03-14 10:55:15.000000000 +0100
 @@ -0,0 +1,19 @@
 +
 +extern unsigned long xen_get_cpuid(int);
@@ -25346,9 +21363,9 @@
 +	return 1;
 +}
 +
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/xen/xenivt.S linux-2.6.18-xen/arch/ia64/xen/xenivt.S
---- linux-2.6.18.3/arch/ia64/xen/xenivt.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/ia64/xen/xenivt.S	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S
+--- a/arch/ia64/xen/xenivt.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/ia64/xen/xenivt.S	2007-03-14 10:55:15.000000000 +0100
 @@ -0,0 +1,2169 @@
 +/*
 + * arch/ia64/xen/ivt.S
@@ -27519,9 +23536,9 @@
 +	br.call.sptk.many b6=evtchn_do_upcall
 +END(xen_event_callback)
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/xen/xenminstate.h linux-2.6.18-xen/arch/ia64/xen/xenminstate.h
---- linux-2.6.18.3/arch/ia64/xen/xenminstate.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/ia64/xen/xenminstate.h	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/xen/xenminstate.h b/arch/ia64/xen/xenminstate.h
+--- a/arch/ia64/xen/xenminstate.h	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/ia64/xen/xenminstate.h	2007-03-14 10:55:15.000000000 +0100
 @@ -0,0 +1,368 @@
 +
 +#include <asm/cache.h>
@@ -27891,9 +23908,9 @@
 +#else
 +#define SAVE_MIN		DO_SAVE_MIN(     , mov r30=r0, )
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/xen/xenpal.S linux-2.6.18-xen/arch/ia64/xen/xenpal.S
---- linux-2.6.18.3/arch/ia64/xen/xenpal.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/ia64/xen/xenpal.S	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/xen/xenpal.S b/arch/ia64/xen/xenpal.S
+--- a/arch/ia64/xen/xenpal.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/ia64/xen/xenpal.S	2007-03-14 10:55:15.000000000 +0100
 @@ -0,0 +1,76 @@
 +/*
 + * ia64/xen/xenpal.S
@@ -27971,9 +23988,9 @@
 +	srlz.d				// seralize restoration of psr.l
 +	br.ret.sptk.many b0
 +END(xen_pal_call_static)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/ia64/xen/xensetup.S linux-2.6.18-xen/arch/ia64/xen/xensetup.S
---- linux-2.6.18.3/arch/ia64/xen/xensetup.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/ia64/xen/xensetup.S	2006-11-19 14:26:23.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S
+--- a/arch/ia64/xen/xensetup.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/ia64/xen/xensetup.S	2007-03-14 10:55:15.000000000 +0100
 @@ -0,0 +1,54 @@
 +/*
 + * Support routines for Xen
@@ -28029,9 +24046,9 @@
 +	mov ar.pfs=r20
 +	br.ret.sptk.many b0
 +END(HYPERVISOR_suspend)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/um/kernel/physmem.c linux-2.6.18-xen/arch/um/kernel/physmem.c
---- linux-2.6.18.3/arch/um/kernel/physmem.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/um/kernel/physmem.c	2006-11-19 14:26:32.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
+--- a/arch/um/kernel/physmem.c	2007-03-15 15:56:05.000000000 +0100
++++ b/arch/um/kernel/physmem.c	2007-03-14 10:55:20.000000000 +0100
 @@ -226,7 +226,7 @@
  EXPORT_SYMBOL(physmem_remove_mapping);
  EXPORT_SYMBOL(physmem_subst_mapping);
@@ -28050,9 +24067,9 @@
  }
  
  int is_remapped(void *virt)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/ia32/ia32entry-xen.S linux-2.6.18-xen/arch/x86_64/ia32/ia32entry-xen.S
---- linux-2.6.18.3/arch/x86_64/ia32/ia32entry-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/ia32/ia32entry-xen.S	2006-11-19 14:26:33.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/ia32/ia32entry-xen.S b/arch/x86_64/ia32/ia32entry-xen.S
+--- a/arch/x86_64/ia32/ia32entry-xen.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/ia32/ia32entry-xen.S	2007-03-14 10:55:21.000000000 +0100
 @@ -0,0 +1,743 @@
 +/*
 + * Compatibility mode system call entry point for x86-64. 
@@ -28148,7 +24165,7 @@
 +	CFI_REGISTER	rsp,rbp
 +	__swapgs 
 +	movq	%gs:pda_kernelstack, %rsp
-+	addq	$(PDA_STACKOFFSET),%rsp
++	addq	$(PDA_STACKOFFSET),%rsp	
 +	/*
 +	 * No need to follow this irqs on/off section: the syscall
 +	 * disabled irqs, here we enable it straight after entry:
@@ -28218,7 +24235,7 @@
 +	XEN_UNBLOCK_EVENTS(%r11)		
 +	__sti		/* sti only takes effect after the next instruction */
 +	/* sysexit */
-+	.byte	0xf, 0x35  /* TBD */
++	.byte	0xf, 0x35
 +
 +sysenter_tracesys:
 +	CFI_RESTORE_STATE
@@ -28322,7 +24339,7 @@
 +	movl RSP-ARGOFFSET(%rsp),%esp
 +	CFI_RESTORE rsp
 +	__swapgs
-+	sysretl  /* TBD */
++	sysretl
 +	
 +cstar_tracesys:	
 +	CFI_RESTORE_STATE
@@ -28391,7 +24408,6 @@
 +	pushq %rax
 +	CFI_ADJUST_CFA_OFFSET 8
 +	cld
-+/* 1:	jmp 1b	 */
 +	/* note the registers are not zero extended to the sf.
 +	   this could be a problem. */
 +	SAVE_ARGS 0,0,1
@@ -28786,8 +24802,8 @@
 +	.quad sys_readlinkat		/* 305 */
 +	.quad sys_fchmodat
 +	.quad sys_faccessat
-+	.quad quiet_ni_syscall		/* pselect6 for now */
-+	.quad quiet_ni_syscall		/* ppoll for now */
++	.quad compat_sys_pselect6
++	.quad compat_sys_ppoll
 +	.quad sys_unshare		/* 310 */
 +	.quad compat_sys_set_robust_list
 +	.quad compat_sys_get_robust_list
@@ -28796,11 +24812,32 @@
 +	.quad sys_tee
 +	.quad compat_sys_vmsplice
 +	.quad compat_sys_move_pages
++	.quad sys_getcpu
 +ia32_syscall_end:		
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/ia32/Makefile linux-2.6.18-xen/arch/x86_64/ia32/Makefile
---- linux-2.6.18.3/arch/x86_64/ia32/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/ia32/Makefile	2006-11-19 14:26:33.000000000 +0100
-@@ -27,9 +27,25 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/ia32/Makefile b/arch/x86_64/ia32/Makefile
+--- a/arch/x86_64/ia32/Makefile	2007-03-12 21:58:06.000000000 +0100
++++ b/arch/x86_64/ia32/Makefile	2007-03-14 10:55:21.000000000 +0100
+@@ -14,11 +14,19 @@
+ audit-class-$(CONFIG_AUDIT) := audit.o
+ obj-$(CONFIG_IA32_EMULATION) += $(audit-class-y)
+ 
++ifdef CONFIG_XEN
++$(obj)/syscall32_syscall.o: \
++	$(foreach F,int80 sysenter syscall,$(obj)/vsyscall-$F.so)
++
++targets := $(foreach F,int80 sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
++
++else
+ $(obj)/syscall32_syscall.o: \
+ 	$(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so)
+ 
+ # Teach kbuild about targets
+ targets := $(foreach F,sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
++endif
+ 
+ # The DSO images are built using a special linker script
+ quiet_cmd_syscall = SYSCALL $@
+@@ -27,9 +35,20 @@
  			   -Wl,-soname=linux-gate.so.1 -o $@ \
  			   -Wl,-T,$(filter-out FORCE,$^)
  
@@ -28816,141 +24853,38 @@
 +
 +ifdef CONFIG_XEN
 +AFLAGS_vsyscall-int80.o = -m32 -Wa,-32 -Iarch/i386/kernel
-+CFLAGS_syscall32-xen.o += -DUSE_INT80
-+AFLAGS_syscall32_syscall-xen.o += -DUSE_INT80
-+
-+$(obj)/syscall32_syscall-xen.o: \
-+	$(foreach F,int80 sysenter syscall,$(obj)/vsyscall-$F.so)
-+
-+targets := $(foreach F,int80 sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
++CFLAGS_syscall32.o += -DUSE_INT80
++AFLAGS_syscall32_syscall.o += -DUSE_INT80
 +
 +include $(srctree)/scripts/Makefile.xen
 +
 +obj-y := $(call cherrypickxen, $(obj-y))
 +endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/ia32/syscall32_syscall-xen.S linux-2.6.18-xen/arch/x86_64/ia32/syscall32_syscall-xen.S
---- linux-2.6.18.3/arch/x86_64/ia32/syscall32_syscall-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/ia32/syscall32_syscall-xen.S	2006-11-19 14:26:33.000000000 +0100
-@@ -0,0 +1,28 @@
-+/* 32bit VDSOs mapped into user space. */
-+
-+	.section ".init.data","aw"
-+
-+#ifdef USE_INT80
-+
-+	.globl syscall32_int80
-+	.globl syscall32_int80_end
-+
-+syscall32_int80:
-+	.incbin "arch/x86_64/ia32/vsyscall-int80.so"
-+syscall32_int80_end:
-+
-+#endif
-+
-+	.globl syscall32_syscall
-+	.globl syscall32_syscall_end
-+
-+syscall32_syscall:
-+	.incbin "arch/x86_64/ia32/vsyscall-syscall.so"
-+syscall32_syscall_end:
-+
-+	.globl syscall32_sysenter
-+	.globl syscall32_sysenter_end
-+
-+syscall32_sysenter:
-+	.incbin "arch/x86_64/ia32/vsyscall-sysenter.so"
-+syscall32_sysenter_end:
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/ia32/syscall32-xen.c linux-2.6.18-xen/arch/x86_64/ia32/syscall32-xen.c
---- linux-2.6.18.3/arch/x86_64/ia32/syscall32-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/ia32/syscall32-xen.c	2006-11-19 14:26:33.000000000 +0100
-@@ -0,0 +1,128 @@
-+/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
-+
-+/* vsyscall handling for 32bit processes. Map a stub page into it 
-+   on demand because 32bit cannot reach the kernel's fixmaps */
-+
-+#include <linux/mm.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/gfp.h>
-+#include <linux/init.h>
-+#include <linux/stringify.h>
-+#include <linux/security.h>
-+#include <asm/proto.h>
-+#include <asm/tlbflush.h>
-+#include <asm/ia32_unistd.h>
-+
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
+--- a/arch/x86_64/ia32/syscall32.c	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/ia32/syscall32.c	2007-03-14 10:55:21.000000000 +0100
+@@ -14,12 +14,17 @@
+ #include <asm/tlbflush.h>
+ #include <asm/ia32_unistd.h>
+ 
 +#ifdef USE_INT80
 +extern unsigned char syscall32_int80[], syscall32_int80_end[];
 +#endif
-+extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
-+extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
-+extern int sysctl_vsyscall32;
-+
-+char *syscall32_page; 
+ extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
+ extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
+ extern int sysctl_vsyscall32;
+ 
+ char *syscall32_page; 
 +#ifndef USE_INT80
-+static int use_sysenter = -1;
+ static int use_sysenter = -1;
 +#endif
-+
-+static struct page *
-+syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
-+{
-+	struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
-+	get_page(p);
-+	return p;
-+}
-+
-+/* Prevent VMA merging */
-+static void syscall32_vma_close(struct vm_area_struct *vma)
-+{
-+}
-+
-+static struct vm_operations_struct syscall32_vm_ops = {
-+	.close = syscall32_vma_close,
-+	.nopage = syscall32_nopage,
-+};
-+
-+struct linux_binprm;
-+
-+/* Setup a VMA at program startup for the vsyscall page */
-+int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
-+{
-+	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
-+	struct vm_area_struct *vma;
-+	struct mm_struct *mm = current->mm;
-+	int ret;
-+
-+	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-+	if (!vma)
-+		return -ENOMEM;
-+
-+	memset(vma, 0, sizeof(struct vm_area_struct));
-+	/* Could randomize here */
-+	vma->vm_start = VSYSCALL32_BASE;
-+	vma->vm_end = VSYSCALL32_END;
-+	/* MAYWRITE to allow gdb to COW and set breakpoints */
-+	vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
-+	vma->vm_flags |= mm->def_flags;
-+	vma->vm_page_prot = protection_map[vma->vm_flags & 7];
-+	vma->vm_ops = &syscall32_vm_ops;
-+	vma->vm_mm = mm;
-+
-+	down_write(&mm->mmap_sem);
-+	if ((ret = insert_vm_struct(mm, vma))) {
-+		up_write(&mm->mmap_sem);
-+		kmem_cache_free(vm_area_cachep, vma);
-+		return ret;
-+	}
-+	mm->total_vm += npages;
-+	up_write(&mm->mmap_sem);
-+	return 0;
-+}
-+
-+static int __init init_syscall32(void)
-+{ 
-+	syscall32_page = (void *)get_zeroed_page(GFP_KERNEL); 
-+	if (!syscall32_page) 
-+		panic("Cannot allocate syscall32 page"); 
+ 
+ static struct page *
+ syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
+@@ -95,6 +100,14 @@
+ 	syscall32_page = (void *)get_zeroed_page(GFP_KERNEL); 
+ 	if (!syscall32_page) 
+ 		panic("Cannot allocate syscall32 page"); 
 +
 +#ifdef USE_INT80
 +	/*
@@ -28959,42 +24893,62 @@
 +	memcpy(syscall32_page, syscall32_int80,
 +	       syscall32_int80_end - syscall32_int80);
 +#else
-+ 	if (use_sysenter > 0) {
-+ 		memcpy(syscall32_page, syscall32_sysenter,
-+ 		       syscall32_sysenter_end - syscall32_sysenter);
-+ 	} else {
-+  		memcpy(syscall32_page, syscall32_syscall,
-+  		       syscall32_syscall_end - syscall32_syscall);
-+  	}	
+  	if (use_sysenter > 0) {
+  		memcpy(syscall32_page, syscall32_sysenter,
+  		       syscall32_sysenter_end - syscall32_sysenter);
+@@ -102,14 +115,20 @@
+   		memcpy(syscall32_page, syscall32_syscall,
+   		       syscall32_syscall_end - syscall32_syscall);
+   	}	
 +#endif
-+	return 0;
-+} 
+ 	return 0;
+ } 
+-	
+-__initcall(init_syscall32); 
 +
 +/*
 + * This must be done early in case we have an initrd containing 32-bit
 + * binaries (e.g., hotplug). This could be pushed upstream to arch/x86_64.
 + */	
 +core_initcall(init_syscall32); 
-+
-+/* May not be __init: called during resume */
-+void syscall32_cpu_init(void)
-+{
+ 
+ /* May not be __init: called during resume */
+ void syscall32_cpu_init(void)
+ {
 +#ifndef USE_INT80
-+	if (use_sysenter < 0)
-+ 		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
+ 	if (use_sysenter < 0)
+  		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
+ 
+@@ -120,4 +139,5 @@
+ 	checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
+ 
+ 	wrmsrl(MSR_CSTAR, ia32_cstar_target);
++#endif
+ }
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/ia32/syscall32_syscall.S b/arch/x86_64/ia32/syscall32_syscall.S
+--- a/arch/x86_64/ia32/syscall32_syscall.S	2007-03-12 21:58:06.000000000 +0100
++++ b/arch/x86_64/ia32/syscall32_syscall.S	2007-03-14 10:55:21.000000000 +0100
+@@ -2,6 +2,17 @@
+ 
+ 	.section ".init.data","aw"
+ 
++#ifdef USE_INT80
++
++	.globl syscall32_int80
++	.globl syscall32_int80_end
 +
-+	/* Load these always in case some future AMD CPU supports
-+	   SYSENTER from compat mode too. */
-+	checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-+	checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
-+	checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
++syscall32_int80:
++	.incbin "arch/x86_64/ia32/vsyscall-int80.so"
++syscall32_int80_end:
 +
-+	wrmsrl(MSR_CSTAR, ia32_cstar_target);
 +#endif
-+}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/ia32/vsyscall-int80.S linux-2.6.18-xen/arch/x86_64/ia32/vsyscall-int80.S
---- linux-2.6.18.3/arch/x86_64/ia32/vsyscall-int80.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/ia32/vsyscall-int80.S	2006-11-19 14:26:33.000000000 +0100
++
+ 	.globl syscall32_syscall
+ 	.globl syscall32_syscall_end
+ 
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/ia32/vsyscall-int80.S b/arch/x86_64/ia32/vsyscall-int80.S
+--- a/arch/x86_64/ia32/vsyscall-int80.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/ia32/vsyscall-int80.S	2007-03-14 10:55:21.000000000 +0100
 @@ -0,0 +1,58 @@
 +/*
 + * Code for the vsyscall page.  This version uses the old int $0x80 method.
@@ -29054,9 +25008,9 @@
 + */
 +#define SYSCALL_ENTER_KERNEL    int $0x80
 +#include "vsyscall-sigreturn.S"
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/ia32/vsyscall-sigreturn.S linux-2.6.18-xen/arch/x86_64/ia32/vsyscall-sigreturn.S
---- linux-2.6.18.3/arch/x86_64/ia32/vsyscall-sigreturn.S	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/ia32/vsyscall-sigreturn.S	2006-11-19 14:26:33.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/ia32/vsyscall-sigreturn.S b/arch/x86_64/ia32/vsyscall-sigreturn.S
+--- a/arch/x86_64/ia32/vsyscall-sigreturn.S	2007-03-12 21:58:06.000000000 +0100
++++ b/arch/x86_64/ia32/vsyscall-sigreturn.S	2007-03-14 10:55:21.000000000 +0100
 @@ -139,5 +139,5 @@
  	.align 4
  .LENDFDE3:
@@ -29064,10 +25018,10 @@
 -#include "../../i386/kernel/vsyscall-note.S"
 +#include <vsyscall-note.S>
  
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/Kconfig linux-2.6.18-xen/arch/x86_64/Kconfig
---- linux-2.6.18.3/arch/x86_64/Kconfig	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/Kconfig	2006-12-05 18:42:36.000000000 +0100
-@@ -30,6 +30,7 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
+--- a/arch/x86_64/Kconfig	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/Kconfig	2007-03-14 10:55:21.000000000 +0100
+@@ -34,6 +34,7 @@
  
  config STACKTRACE_SUPPORT
  	bool
@@ -29075,7 +25029,7 @@
  	default y
  
  config SEMAPHORE_SLEEPERS
-@@ -135,6 +136,22 @@
+@@ -171,6 +172,22 @@
  
  endchoice
  
@@ -29098,7 +25052,7 @@
  #
  # Define implied options from the CPU selection here
  #
-@@ -155,6 +172,7 @@
+@@ -191,6 +208,7 @@
  
  config X86_TSC
  	bool
@@ -29106,7 +25060,7 @@
  	default y
  
  config X86_GOOD_APIC
-@@ -197,7 +215,7 @@
+@@ -239,7 +257,7 @@
  
  config X86_HT
  	bool
@@ -29115,7 +25069,7 @@
  	default y
  
  config MATH_EMULATION
-@@ -211,14 +229,22 @@
+@@ -253,14 +271,22 @@
  
  config X86_IO_APIC
  	bool
@@ -29138,7 +25092,7 @@
  	---help---
  	  On Intel P6 family processors (Pentium Pro, Pentium II and later)
  	  the Memory Type Range Registers (MTRRs) may be used to control
-@@ -259,7 +285,7 @@
+@@ -301,7 +327,7 @@
  
  config SCHED_SMT
  	bool "SMT (Hyperthreading) scheduler support"
@@ -29147,7 +25101,7 @@
  	default n
  	help
  	  SMT scheduler support improves the CPU scheduler's decision making
-@@ -269,7 +295,7 @@
+@@ -311,7 +337,7 @@
  
  config SCHED_MC
  	bool "Multi-core scheduler support"
@@ -29156,7 +25110,7 @@
  	default y
  	help
  	  Multi-core scheduler support improves the CPU scheduler's decision
-@@ -280,7 +306,7 @@
+@@ -322,7 +348,7 @@
  
  config NUMA
         bool "Non Uniform Memory Access (NUMA) Support"
@@ -29165,7 +25119,7 @@
         help
  	 Enable NUMA (Non Uniform Memory Access) support. The kernel 
  	 will try to allocate memory used by a CPU on the local memory 
-@@ -341,7 +367,7 @@
+@@ -378,7 +404,7 @@
  
  config ARCH_SPARSEMEM_ENABLE
  	def_bool y
@@ -29174,7 +25128,7 @@
  
  config ARCH_MEMORY_PROBE
  	def_bool y
-@@ -365,6 +391,7 @@
+@@ -406,6 +432,7 @@
  	int "Maximum number of CPUs (2-256)"
  	range 2 255
  	depends on SMP
@@ -29182,7 +25136,7 @@
  	default "8"
  	help
  	  This allows you to specify the maximum number of CPUs which this
-@@ -387,6 +414,7 @@
+@@ -428,6 +455,7 @@
  
  config HPET_TIMER
  	bool
@@ -29190,7 +25144,7 @@
  	default y
  	help
  	  Use the IA-PC HPET (High Precision Event Timer) to manage
-@@ -407,7 +435,7 @@
+@@ -448,7 +476,7 @@
  	default y
  	select SWIOTLB
  	select AGP
@@ -29199,7 +25153,7 @@
  	help
  	  Support for full DMA access of devices with 32bit memory access only
  	  on systems with more than 3GB. This is usually needed for USB,
-@@ -444,6 +472,7 @@
+@@ -495,6 +523,7 @@
  
  config X86_MCE
  	bool "Machine check support" if EMBEDDED
@@ -29207,16 +25161,15 @@
  	default y
  	help
  	   Include a machine check error handler to report hardware errors.
-@@ -469,7 +498,7 @@
+@@ -520,6 +549,7 @@
  
  config KEXEC
- 	bool "kexec system call (EXPERIMENTAL)"
--	depends on EXPERIMENTAL
-+	depends on EXPERIMENTAL && !X86_64_XEN
+ 	bool "kexec system call"
++	depends on !X86_64_XEN
  	help
  	  kexec is a system call that implements the ability to shutdown your
  	  current kernel, and to start another kernel.  It is like a reboot
-@@ -564,8 +593,11 @@
+@@ -645,8 +675,11 @@
  	default y
  
  menu "Power management options"
@@ -29228,7 +25181,7 @@
  
  source "drivers/acpi/Kconfig"
  
-@@ -588,6 +620,21 @@
+@@ -669,6 +702,21 @@
  	bool "Support mmconfig PCI config space access"
  	depends on PCI && ACPI
  
@@ -29250,25 +25203,25 @@
  source "drivers/pci/pcie/Kconfig"
  
  source "drivers/pci/Kconfig"
-@@ -658,4 +705,6 @@
+@@ -739,4 +787,6 @@
  
  source "crypto/Kconfig"
  
 +source "drivers/xen/Kconfig"
 +
  source "lib/Kconfig"
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/acpi/Makefile linux-2.6.18-xen/arch/x86_64/kernel/acpi/Makefile
---- linux-2.6.18.3/arch/x86_64/kernel/acpi/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/kernel/acpi/Makefile	2006-11-19 14:26:33.000000000 +0100
-@@ -7,3 +7,4 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile
+--- a/arch/x86_64/kernel/acpi/Makefile	2007-03-12 21:58:06.000000000 +0100
++++ b/arch/x86_64/kernel/acpi/Makefile	2007-03-14 10:55:21.000000000 +0100
+@@ -6,4 +6,3 @@
+ obj-y			+= processor.o
  processor-y		:= ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
  endif
- 
-+boot-$(CONFIG_XEN)		:= ../../../i386/kernel/acpi/boot-xen.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/apic-xen.c linux-2.6.18-xen/arch/x86_64/kernel/apic-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/apic-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/apic-xen.c	2006-11-19 14:26:33.000000000 +0100
-@@ -0,0 +1,197 @@
+-
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/apic-xen.c b/arch/x86_64/kernel/apic-xen.c
+--- a/arch/x86_64/kernel/apic-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/apic-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,254 @@
 +/*
 + *	Local APIC handling, local APIC timers
 + *
@@ -29296,16 +25249,21 @@
 +#include <linux/kernel_stat.h>
 +#include <linux/sysdev.h>
 +#include <linux/module.h>
++#include <linux/ioport.h>
 +
 +#include <asm/atomic.h>
 +#include <asm/smp.h>
 +#include <asm/mtrr.h>
 +#include <asm/mpspec.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/hpet.h>
++#include <asm/pgalloc.h>
++#include <asm/mach_apic.h>
++#include <asm/nmi.h>
 +#include <asm/idle.h>
++#include <asm/proto.h>
++#include <asm/timex.h>
++#include <asm/apic.h>
 +
++int apic_mapped;
 +int apic_verbosity;
 +
 +/*
@@ -29333,19 +25291,21 @@
 +	return -EINVAL;
 +}
 +
-+void smp_local_timer_interrupt(struct pt_regs *regs)
++void smp_local_timer_interrupt(void)
 +{
-+	profile_tick(CPU_PROFILING, regs);
++	profile_tick(CPU_PROFILING);
 +#ifndef CONFIG_XEN
 +#ifdef CONFIG_SMP
-+		update_process_times(user_mode(regs));
++	update_process_times(user_mode(get_irq_regs()));
 +#endif
++	if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id)
++		main_timer_handler();
 +#endif
 +	/*
 +	 * We take the 'long' return path, and there every subsystem
 +	 * grabs the appropriate locks (kernel lock/ irq lock).
 +	 *
-+	 * we might want to decouple profiling from the 'long path',
++	 * We might want to decouple profiling from the 'long path',
 +	 * and do the profiling totally in assembly.
 +	 *
 +	 * Currently this isn't too much of an issue (performance wise),
@@ -29363,6 +25323,8 @@
 + */
 +void smp_apic_timer_interrupt(struct pt_regs *regs)
 +{
++	struct pt_regs *old_regs = set_irq_regs(regs);
++
 +	/*
 +	 * the NMI deadlock-detector uses this.
 +	 */
@@ -29380,8 +25342,9 @@
 +	 */
 +	exit_idle();
 +	irq_enter();
-+	smp_local_timer_interrupt(regs);
++	smp_local_timer_interrupt();
 +	irq_exit();
++	set_irq_regs(old_regs);
 +}
 +
 +/*
@@ -29450,7 +25413,7 @@
 +	irq_exit();
 +}
 +
-+int disable_apic;
++int disable_apic; 
 +
 +/*
 + * This initializes the IO-APIC and APIC hardware if this is
@@ -29458,17 +25421,64 @@
 + */
 +int __init APIC_init_uniprocessor (void)
 +{
-+#ifdef CONFIG_X86_IO_APIC
-+	if (smp_found_config)
-+		if (!skip_ioapic_setup && nr_ioapics)
-+			setup_IO_APIC();
-+#endif
++	if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
++		setup_IO_APIC();
++	return 0;
++}
++
++#ifndef CONFIG_XEN
++static __init int setup_disableapic(char *str) 
++{ 
++	disable_apic = 1;
++	clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
++	return 0;
++}
++early_param("disableapic", setup_disableapic);
++
++/* same as disableapic, for compatibility */
++static __init int setup_nolapic(char *str) 
++{ 
++	return setup_disableapic(str);
++} 
++early_param("nolapic", setup_nolapic);
++
++static __init int setup_noapictimer(char *str) 
++{ 
++	if (str[0] != ' ' && str[0] != 0)
++		return 0;
++	disable_apic_timer = 1;
++	return 1;
++} 
++
++static __init int setup_apicmaintimer(char *str)
++{
++	apic_runs_main_timer = 1;
++	nohpet = 1;
++	return 1;
++}
++__setup("apicmaintimer", setup_apicmaintimer);
 +
++static __init int setup_noapicmaintimer(char *str)
++{
++	apic_runs_main_timer = -1;
 +	return 1;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/asm-offsets.c linux-2.6.18-xen/arch/x86_64/kernel/asm-offsets.c
---- linux-2.6.18.3/arch/x86_64/kernel/asm-offsets.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/kernel/asm-offsets.c	2006-11-19 14:26:33.000000000 +0100
++__setup("noapicmaintimer", setup_noapicmaintimer);
++
++static __init int setup_apicpmtimer(char *s)
++{
++	apic_calibrate_pmtmr = 1;
++	notsc_setup(NULL);
++	return setup_apicmaintimer(NULL);
++}
++__setup("apicpmtimer", setup_apicpmtimer);
++
++__setup("noapictimer", setup_noapictimer); 
++
++#endif
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/asm-offsets.c b/arch/x86_64/kernel/asm-offsets.c
+--- a/arch/x86_64/kernel/asm-offsets.c	2007-03-12 21:58:06.000000000 +0100
++++ b/arch/x86_64/kernel/asm-offsets.c	2007-03-14 10:55:21.000000000 +0100
 @@ -67,8 +67,10 @@
  	DEFINE(pbe_address, offsetof(struct pbe, address));
  	DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
@@ -29480,10 +25490,10 @@
  	BLANK();
  	DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
  	return 0;
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/e820-xen.c linux-2.6.18-xen/arch/x86_64/kernel/e820-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/e820-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/e820-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,757 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/e820-xen.c b/arch/x86_64/kernel/e820-xen.c
+--- a/arch/x86_64/kernel/e820-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/e820-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,758 @@
 +/* 
 + * Handle the memory map.
 + * The functions here do the job until bootmem takes over.
@@ -29502,6 +25512,7 @@
 +#include <linux/string.h>
 +#include <linux/kexec.h>
 +#include <linux/module.h>
++#include <linux/mm.h>
 +
 +#include <asm/pgtable.h>
 +#include <asm/page.h>
@@ -29511,6 +25522,8 @@
 +#include <asm/sections.h>
 +#include <xen/interface/memory.h>
 +
++struct e820map e820 __initdata;
++
 +/* 
 + * PFN of last memory page.
 + */
@@ -29527,7 +25540,7 @@
 +/* 
 + * Last pfn which the user wants to use.
 + */
-+unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;  
++static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT;
 +
 +extern struct resource code_resource, data_resource;
 +
@@ -29539,13 +25552,13 @@
 +#ifndef CONFIG_XEN
 +	/* various gunk below that needed for SMP startup */
 +	if (addr < 0x8000) { 
-+		*addrp = 0x8000;
++		*addrp = PAGE_ALIGN(0x8000);
 +		return 1; 
 +	}
 +
 +	/* direct mapping tables of the kernel */
 +	if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { 
-+		*addrp = table_end << PAGE_SHIFT; 
++		*addrp = PAGE_ALIGN(table_end << PAGE_SHIFT);
 +		return 1;
 +	} 
 +
@@ -29553,23 +25566,18 @@
 +#ifdef CONFIG_BLK_DEV_INITRD
 +	if (LOADER_TYPE && INITRD_START && last >= INITRD_START && 
 +	    addr < INITRD_START+INITRD_SIZE) { 
-+		*addrp = INITRD_START + INITRD_SIZE; 
++		*addrp = PAGE_ALIGN(INITRD_START + INITRD_SIZE);
 +		return 1;
 +	} 
 +#endif
-+	/* kernel code + 640k memory hole (later should not be needed, but 
-+	   be paranoid for now) */
-+	if (last >= 640*1024 && addr < 1024*1024) {
-+		*addrp = 1024*1024;
-+		return 1;
-+	}
-+	if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
-+		*addrp = __pa_symbol(&_end);
++	/* kernel code */
++	if (last >= __pa_symbol(&_text) && addr < __pa_symbol(&_end)) {
++		*addrp = PAGE_ALIGN(__pa_symbol(&_end));
 +		return 1;
 +	}
 +
 +	if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
-+		*addrp = ebda_addr + ebda_size;
++		*addrp = PAGE_ALIGN(ebda_addr + ebda_size);
 +		return 1;
 +	}
 +
@@ -29648,7 +25656,7 @@
 +			continue; 
 +		while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
 +			;
-+		last = addr + size;
++		last = PAGE_ALIGN(addr) + size;
 +		if (last > ei->addr + ei->size)
 +			continue;
 +		if (last > end) 
@@ -29658,59 +25666,14 @@
 +	return -1UL;		
 +} 
 +
-+/* 
-+ * Free bootmem based on the e820 table for a node.
-+ */
-+void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
-+{
-+	int i;
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i]; 
-+		unsigned long last, addr;
-+
-+		if (ei->type != E820_RAM || 
-+		    ei->addr+ei->size <= start || 
-+		    ei->addr >= end)
-+			continue;
-+
-+		addr = round_up(ei->addr, PAGE_SIZE);
-+		if (addr < start) 
-+			addr = start;
-+
-+		last = round_down(ei->addr + ei->size, PAGE_SIZE); 
-+		if (last >= end)
-+			last = end; 
-+
-+		if (last > addr && last-addr >= PAGE_SIZE)
-+			free_bootmem_node(pgdat, addr, last-addr);
-+	}
-+}
-+
 +/*
 + * Find the highest page frame number we have available
 + */
 +unsigned long __init e820_end_of_ram(void)
 +{
-+	int i;
 +	unsigned long end_pfn = 0;
++	end_pfn = find_max_pfn_with_active_regions();
 +	
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i]; 
-+		unsigned long start, end;
-+
-+		start = round_up(ei->addr, PAGE_SIZE); 
-+		end = round_down(ei->addr + ei->size, PAGE_SIZE); 
-+		if (start >= end)
-+			continue;
-+		if (ei->type == E820_RAM) { 
-+		if (end > end_pfn<<PAGE_SHIFT)
-+			end_pfn = end>>PAGE_SHIFT;
-+		} else { 
-+			if (end > end_pfn_map<<PAGE_SHIFT) 
-+				end_pfn_map = end>>PAGE_SHIFT;
-+		} 
-+	}
-+
 +	if (end_pfn > end_pfn_map) 
 +		end_pfn_map = end_pfn;
 +	if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
@@ -29720,43 +25683,10 @@
 +	if (end_pfn > end_pfn_map) 
 +		end_pfn = end_pfn_map; 
 +
++	printk("end_pfn_map = %lu\n", end_pfn_map);
 +	return end_pfn;	
 +}
 +
-+/* 
-+ * Compute how much memory is missing in a range.
-+ * Unlike the other functions in this file the arguments are in page numbers.
-+ */
-+unsigned long __init
-+e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
-+{
-+	unsigned long ram = 0;
-+	unsigned long start = start_pfn << PAGE_SHIFT;
-+	unsigned long end = end_pfn << PAGE_SHIFT;
-+	int i;
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i];
-+		unsigned long last, addr;
-+
-+		if (ei->type != E820_RAM ||
-+		    ei->addr+ei->size <= start ||
-+		    ei->addr >= end)
-+			continue;
-+
-+		addr = round_up(ei->addr, PAGE_SIZE);
-+		if (addr < start)
-+			addr = start;
-+
-+		last = round_down(ei->addr + ei->size, PAGE_SIZE);
-+		if (last >= end)
-+			last = end;
-+
-+		if (last > addr)
-+			ram += last - addr;
-+	}
-+	return ((end - start) - ram) >> PAGE_SHIFT;
-+}
-+
 +/*
 + * Mark e820 reserved areas as busy for the resource manager.
 + */
@@ -29793,6 +25723,96 @@
 +	}
 +}
 +
++/* Mark pages corresponding to given address range as nosave */
++static void __init
++e820_mark_nosave_range(unsigned long start, unsigned long end)
++{
++	unsigned long pfn, max_pfn;
++
++	if (start >= end)
++		return;
++
++	printk("Nosave address range: %016lx - %016lx\n", start, end);
++	max_pfn = end >> PAGE_SHIFT;
++	for (pfn = start >> PAGE_SHIFT; pfn < max_pfn; pfn++)
++		if (pfn_valid(pfn))
++			SetPageNosave(pfn_to_page(pfn));
++}
++
++/*
++ * Find the ranges of physical addresses that do not correspond to
++ * e820 RAM areas and mark the corresponding pages as nosave for software
++ * suspend and suspend to RAM.
++ *
++ * This function requires the e820 map to be sorted and without any
++ * overlapping entries and assumes the first e820 area to be RAM.
++ */
++void __init e820_mark_nosave_regions(void)
++{
++	int i;
++	unsigned long paddr;
++
++	paddr = round_down(e820.map[0].addr + e820.map[0].size, PAGE_SIZE);
++	for (i = 1; i < e820.nr_map; i++) {
++		struct e820entry *ei = &e820.map[i];
++
++		if (paddr < ei->addr)
++			e820_mark_nosave_range(paddr,
++					round_up(ei->addr, PAGE_SIZE));
++
++		paddr = round_down(ei->addr + ei->size, PAGE_SIZE);
++		if (ei->type != E820_RAM)
++			e820_mark_nosave_range(round_up(ei->addr, PAGE_SIZE),
++					paddr);
++
++		if (paddr >= (end_pfn << PAGE_SHIFT))
++			break;
++	}
++}
++
++/* Walk the e820 map and register active regions within a node */
++void __init
++e820_register_active_regions(int nid, unsigned long start_pfn,
++							unsigned long end_pfn)
++{
++	int i;
++	unsigned long ei_startpfn, ei_endpfn;
++	for (i = 0; i < e820.nr_map; i++) {
++		struct e820entry *ei = &e820.map[i];
++		ei_startpfn = round_up(ei->addr, PAGE_SIZE) >> PAGE_SHIFT;
++		ei_endpfn = round_down(ei->addr + ei->size, PAGE_SIZE)
++								>> PAGE_SHIFT;
++
++		/* Skip map entries smaller than a page */
++		if (ei_startpfn >= ei_endpfn)
++			continue;
++
++		/* Check if end_pfn_map should be updated */
++		if (ei->type != E820_RAM && ei_endpfn > end_pfn_map)
++			end_pfn_map = ei_endpfn;
++
++		/* Skip if map is outside the node */
++		if (ei->type != E820_RAM ||
++				ei_endpfn <= start_pfn ||
++				ei_startpfn >= end_pfn)
++			continue;
++
++		/* Check for overlaps */
++		if (ei_startpfn < start_pfn)
++			ei_startpfn = start_pfn;
++		if (ei_endpfn > end_pfn)
++			ei_endpfn = end_pfn;
++
++		/* Obey end_user_pfn to save on memmap */
++		if (ei_startpfn >= end_user_pfn)
++			continue;
++		if (ei_endpfn > end_user_pfn)
++			ei_endpfn = end_user_pfn;
++
++		add_active_range(nid, ei_startpfn, ei_endpfn);
++	}
++}
++
 +/* 
 + * Add a memory region to the kernel e820 map.
 + */ 
@@ -30013,13 +26033,6 @@
 + * If we're lucky and live on a modern system, the setup code
 + * will have given us a memory map that we can use to properly
 + * set up memory.  If we aren't, we'll fake a memory map.
-+ *
-+ * We check to see that the memory map contains at least 2 elements
-+ * before we'll use it, because the detection code in setup.S may
-+ * not be perfect and most every PC known to man has two memory
-+ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
-+ * thinkpad 560x, for example, does not cooperate with the memory
-+ * detection code.)
 + */
 +static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
 +{
@@ -30041,37 +26054,20 @@
 +		if (start > end)
 +			return -1;
 +
-+#ifndef CONFIG_XEN
-+		/*
-+		 * Some BIOSes claim RAM in the 640k - 1M region.
-+		 * Not right. Fix it up.
-+		 * 
-+		 * This should be removed on Hammer which is supposed to not
-+		 * have non e820 covered ISA mappings there, but I had some strange
-+		 * problems so it stays for now.  -AK
-+		 */
-+		if (type == E820_RAM) {
-+			if (start < 0x100000ULL && end > 0xA0000ULL) {
-+				if (start < 0xA0000ULL)
-+					add_memory_region(start, 0xA0000ULL-start, type);
-+				if (end <= 0x100000ULL)
-+					continue;
-+				start = 0x100000ULL;
-+				size = end - start;
-+			}
-+		}
-+#endif
-+
 +		add_memory_region(start, size, type);
 +	} while (biosmap++,--nr_map);
 +	return 0;
 +}
 +
-+#ifndef CONFIG_XEN
-+void __init setup_memory_region(void)
++void early_panic(char *msg)
 +{
-+	char *who = "BIOS-e820";
++	early_printk(msg);
++	panic(msg);
++}
 +
++void __init setup_memory_region(void)
++{
++#ifndef CONFIG_XEN
 +	/*
 +	 * Try to copy the BIOS-supplied E820-map.
 +	 *
@@ -30079,30 +26075,11 @@
 +	 * the next section from 1mb->appropriate_mem_k
 +	 */
 +	sanitize_e820_map(E820_MAP, &E820_MAP_NR);
-+	if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
-+		unsigned long mem_size;
-+
-+		/* compare results from other methods and take the greater */
-+		if (ALT_MEM_K < EXT_MEM_K) {
-+			mem_size = EXT_MEM_K;
-+			who = "BIOS-88";
-+		} else {
-+			mem_size = ALT_MEM_K;
-+			who = "BIOS-e801";
-+		}
-+
-+		e820.nr_map = 0;
-+		add_memory_region(0, LOWMEMSIZE(), E820_RAM);
-+		add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
-+  	}
++	if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0)
++		early_panic("Cannot find a valid memory map");
 +	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+	e820_print_map(who);
-+}
-+
++	e820_print_map("BIOS-e820");
 +#else  /* CONFIG_XEN */
-+
-+void __init setup_memory_region(void)
-+{
 +	int rc;
 +	struct xen_memory_map memmap;
 +	/*
@@ -30127,21 +26104,22 @@
 +	BUG_ON(rc);
 +
 +	sanitize_e820_map(map, (char *)&memmap.nr_entries);
-+
-+	BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
-+
++	if (copy_e820_map(map, (char)memmap.nr_entries) < 0)
++		early_panic("Cannot find a valid memory map");
 +	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
 +	e820_print_map("Xen");
-+}
 +#endif
++}
 +
-+void __init parse_memopt(char *p, char **from) 
++static int __init parse_memopt(char *p)
 +{ 
 +	int i;
 +	unsigned long current_end;
 +	unsigned long end;
 +
-+	end_user_pfn = memparse(p, from);
++	if (!p)
++		return -EINVAL;
++	end_user_pfn = memparse(p, &p);
 +	end_user_pfn >>= PAGE_SHIFT;	
 +
 +	end = end_user_pfn<<PAGE_SHIFT;
@@ -30158,27 +26136,60 @@
 +		else
 +			add_memory_region(current_end, end - current_end, E820_RAM);
 +	}
++	return 0;
 +} 
++early_param("mem", parse_memopt);
++
++static int userdef __initdata;
 +
-+void __init parse_memmapopt(char *p, char **from)
++static int __init parse_memmap_opt(char *p)
 +{
++	char *oldp;
 +	unsigned long long start_at, mem_size;
 +
-+	mem_size = memparse(p, from);
-+	p = *from;
++	if (!strcmp(p, "exactmap")) {
++#ifdef CONFIG_CRASH_DUMP
++		/* If we are doing a crash dump, we
++		 * still need to know the real mem
++		 * size before original memory map is
++		 * reset.
++		 */
++		e820_register_active_regions(0, 0, -1UL);
++		saved_max_pfn = e820_end_of_ram();
++		remove_all_active_ranges();
++#endif
++		end_pfn_map = 0;
++		e820.nr_map = 0;
++		userdef = 1;
++		return 0;
++	}
++
++	oldp = p;
++	mem_size = memparse(p, &p);
++	if (p == oldp)
++		return -EINVAL;
 +	if (*p == '@') {
-+		start_at = memparse(p+1, from);
++		start_at = memparse(p+1, &p);
 +		add_memory_region(start_at, mem_size, E820_RAM);
 +	} else if (*p == '#') {
-+		start_at = memparse(p+1, from);
++		start_at = memparse(p+1, &p);
 +		add_memory_region(start_at, mem_size, E820_ACPI);
 +	} else if (*p == '$') {
-+		start_at = memparse(p+1, from);
++		start_at = memparse(p+1, &p);
 +		add_memory_region(start_at, mem_size, E820_RESERVED);
 +	} else {
 +		end_user_pfn = (mem_size >> PAGE_SHIFT);
 +	}
-+	p = *from;
++	return *p == '\0' ? 0 : -EINVAL;
++}
++early_param("memmap", parse_memmap_opt);
++
++void finish_e820_parsing(void)
++{
++	if (userdef) {
++		printk(KERN_INFO "user-defined physical RAM map:\n");
++		e820_print_map("user");
++	}
 +}
 +
 +unsigned long pci_mem_start = 0xaeedbabe;
@@ -30241,163 +26252,21 @@
 +	printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
 +		pci_mem_start, gapstart, gapsize);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/early_printk-xen.c linux-2.6.18-xen/arch/x86_64/kernel/early_printk-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/early_printk-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/early_printk-xen.c	2006-11-19 14:26:33.000000000 +0100
-@@ -0,0 +1,304 @@
-+
-+#include <linux/console.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/string.h>
-+#include <linux/screen_info.h>
-+#include <asm/io.h>
-+#include <asm/processor.h>
-+#include <asm/fcntl.h>
-+
-+/* Simple VGA output */
-+
-+#ifdef __i386__
-+#include <asm/setup.h>
-+#define VGABASE		(__ISA_IO_base + 0xb8000)
-+#else
-+#include <asm/bootsetup.h>
-+#define VGABASE		((void __iomem *)0xffffffff800b8000UL)
-+#endif
-+
-+static int max_ypos = 25, max_xpos = 80;
-+static int current_ypos = 25, current_xpos = 0;
-+
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
+--- a/arch/x86_64/kernel/early_printk.c	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/kernel/early_printk.c	2007-03-14 10:55:21.000000000 +0100
+@@ -20,6 +20,7 @@
+ static int max_ypos = 25, max_xpos = 80;
+ static int current_ypos = 25, current_xpos = 0;
+ 
 +#ifndef CONFIG_XEN
-+static void early_vga_write(struct console *con, const char *str, unsigned n)
-+{
-+	char c;
-+	int  i, k, j;
-+
-+	while ((c = *str++) != '\0' && n-- > 0) {
-+		if (current_ypos >= max_ypos) {
-+			/* scroll 1 line up */
-+			for (k = 1, j = 0; k < max_ypos; k++, j++) {
-+				for (i = 0; i < max_xpos; i++) {
-+					writew(readw(VGABASE+2*(max_xpos*k+i)),
-+					       VGABASE + 2*(max_xpos*j + i));
-+				}
-+			}
-+			for (i = 0; i < max_xpos; i++)
-+				writew(0x720, VGABASE + 2*(max_xpos*j + i));
-+			current_ypos = max_ypos-1;
-+		}
-+		if (c == '\n') {
-+			current_xpos = 0;
-+			current_ypos++;
-+		} else if (c != '\r')  {
-+			writew(((0x7 << 8) | (unsigned short) c),
-+			       VGABASE + 2*(max_xpos*current_ypos +
-+						current_xpos++));
-+			if (current_xpos >= max_xpos) {
-+				current_xpos = 0;
-+				current_ypos++;
-+			}
-+		}
-+	}
-+}
-+
-+static struct console early_vga_console = {
-+	.name =		"earlyvga",
-+	.write =	early_vga_write,
-+	.flags =	CON_PRINTBUFFER,
-+	.index =	-1,
-+};
-+
-+/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
-+
-+static int early_serial_base = 0x3f8;  /* ttyS0 */
-+
-+#define XMTRDY          0x20
-+
-+#define DLAB		0x80
-+
-+#define TXR             0       /*  Transmit register (WRITE) */
-+#define RXR             0       /*  Receive register  (READ)  */
-+#define IER             1       /*  Interrupt Enable          */
-+#define IIR             2       /*  Interrupt ID              */
-+#define FCR             2       /*  FIFO control              */
-+#define LCR             3       /*  Line control              */
-+#define MCR             4       /*  Modem control             */
-+#define LSR             5       /*  Line Status               */
-+#define MSR             6       /*  Modem Status              */
-+#define DLL             0       /*  Divisor Latch Low         */
-+#define DLH             1       /*  Divisor latch High        */
-+
-+static int early_serial_putc(unsigned char ch)
-+{
-+	unsigned timeout = 0xffff;
-+	while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
-+		cpu_relax();
-+	outb(ch, early_serial_base + TXR);
-+	return timeout ? 0 : -1;
-+}
-+
-+static void early_serial_write(struct console *con, const char *s, unsigned n)
-+{
-+	while (*s && n-- > 0) {
-+		early_serial_putc(*s);
-+		if (*s == '\n')
-+			early_serial_putc('\r');
-+		s++;
-+	}
-+}
-+
-+#define DEFAULT_BAUD 9600
-+
-+static __init void early_serial_init(char *s)
-+{
-+	unsigned char c;
-+	unsigned divisor;
-+	unsigned baud = DEFAULT_BAUD;
-+	char *e;
-+
-+	if (*s == ',')
-+		++s;
-+
-+	if (*s) {
-+		unsigned port;
-+		if (!strncmp(s,"0x",2)) {
-+			early_serial_base = simple_strtoul(s, &e, 16);
-+		} else {
-+			static int bases[] = { 0x3f8, 0x2f8 };
-+
-+			if (!strncmp(s,"ttyS",4))
-+				s += 4;
-+			port = simple_strtoul(s, &e, 10);
-+			if (port > 1 || s == e)
-+				port = 0;
-+			early_serial_base = bases[port];
-+		}
-+		s += strcspn(s, ",");
-+		if (*s == ',')
-+			s++;
-+	}
-+
-+	outb(0x3, early_serial_base + LCR);	/* 8n1 */
-+	outb(0, early_serial_base + IER);	/* no interrupt */
-+	outb(0, early_serial_base + FCR);	/* no fifo */
-+	outb(0x3, early_serial_base + MCR);	/* DTR + RTS */
-+
-+	if (*s) {
-+		baud = simple_strtoul(s, &e, 0);
-+		if (baud == 0 || s == e)
-+			baud = DEFAULT_BAUD;
-+	}
-+
-+	divisor = 115200 / baud;
-+	c = inb(early_serial_base + LCR);
-+	outb(c | DLAB, early_serial_base + LCR);
-+	outb(divisor & 0xff, early_serial_base + DLL);
-+	outb((divisor >> 8) & 0xff, early_serial_base + DLH);
-+	outb(c & ~DLAB, early_serial_base + LCR);
-+}
-+
+ static void early_vga_write(struct console *con, const char *str, unsigned n)
+ {
+ 	char c;
+@@ -149,6 +150,39 @@
+ 	outb(c & ~DLAB, early_serial_base + LCR);
+ }
+ 
 +#else /* CONFIG_XEN */
 +
 +#undef SCREEN_INFO
@@ -30431,128 +26300,13 @@
 +
 +#endif
 +
-+static struct console early_serial_console = {
-+	.name =		"earlyser",
-+	.write =	early_serial_write,
-+	.flags =	CON_PRINTBUFFER,
-+	.index =	-1,
-+};
-+
-+/* Console interface to a host file on AMD's SimNow! */
-+
-+static int simnow_fd;
-+
-+enum {
-+	MAGIC1 = 0xBACCD00A,
-+	MAGIC2 = 0xCA110000,
-+	XOPEN = 5,
-+	XWRITE = 4,
-+};
-+
-+static noinline long simnow(long cmd, long a, long b, long c)
-+{
-+	long ret;
-+	asm volatile("cpuid" :
-+		     "=a" (ret) :
-+		     "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
-+	return ret;
-+}
-+
-+void __init simnow_init(char *str)
-+{
-+	char *fn = "klog";
-+	if (*str == '=')
-+		fn = ++str;
-+	/* error ignored */
-+	simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
-+}
-+
-+static void simnow_write(struct console *con, const char *s, unsigned n)
-+{
-+	simnow(XWRITE, simnow_fd, (unsigned long)s, n);
-+}
-+
-+static struct console simnow_console = {
-+	.name =		"simnow",
-+	.write =	simnow_write,
-+	.flags =	CON_PRINTBUFFER,
-+	.index =	-1,
-+};
-+
-+/* Direct interface for emergencies */
-+struct console *early_console = &early_vga_console;
-+static int early_console_initialized = 0;
-+
-+void early_printk(const char *fmt, ...)
-+{
-+	char buf[512];
-+	int n;
-+	va_list ap;
-+
-+	va_start(ap,fmt);
-+	n = vscnprintf(buf,512,fmt,ap);
-+	early_console->write(early_console,buf,n);
-+	va_end(ap);
-+}
-+
-+static int __initdata keep_early;
-+
-+int __init setup_early_printk(char *opt)
-+{
-+	char *space;
-+	char buf[256];
-+
-+	if (early_console_initialized)
-+		return 1;
-+
-+	strlcpy(buf,opt,sizeof(buf));
-+	space = strchr(buf, ' ');
-+	if (space)
-+		*space = 0;
-+
-+	if (strstr(buf,"keep"))
-+		keep_early = 1;
-+
-+	if (!strncmp(buf, "serial", 6)) {
-+		early_serial_init(buf + 6);
-+		early_console = &early_serial_console;
-+	} else if (!strncmp(buf, "ttyS", 4)) {
-+		early_serial_init(buf);
-+		early_console = &early_serial_console;
-+	} else if (!strncmp(buf, "vga", 3)
-+	           && SCREEN_INFO.orig_video_isVGA == 1) {
-+		max_xpos = SCREEN_INFO.orig_video_cols;
-+		max_ypos = SCREEN_INFO.orig_video_lines;
-+		current_ypos = SCREEN_INFO.orig_y;
-+		early_console = &early_vga_console;
-+ 	} else if (!strncmp(buf, "simnow", 6)) {
-+ 		simnow_init(buf + 6);
-+ 		early_console = &simnow_console;
-+ 		keep_early = 1;
-+	}
-+	early_console_initialized = 1;
-+	register_console(early_console);
-+	return 0;
-+}
-+
-+void __init disable_early_printk(void)
-+{
-+	if (!early_console_initialized || !early_console)
-+		return;
-+	if (!keep_early) {
-+		printk("disabling early console\n");
-+		unregister_console(early_console);
-+		early_console_initialized = 0;
-+	} else {
-+		printk("keeping early console\n");
-+	}
-+}
-+
-+__setup("earlyprintk=", setup_early_printk);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/entry-xen.S linux-2.6.18-xen/arch/x86_64/kernel/entry-xen.S
---- linux-2.6.18.3/arch/x86_64/kernel/entry-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/entry-xen.S	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,1213 @@
+ static struct console early_serial_console = {
+ 	.name =		"earlyser",
+ 	.write =	early_serial_write,
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/entry-xen.S b/arch/x86_64/kernel/entry-xen.S
+--- a/arch/x86_64/kernel/entry-xen.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/entry-xen.S	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,1160 @@
 +/*
 + *  linux/arch/x86_64/entry.S
 + *
@@ -30560,8 +26314,6 @@
 + *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
 + *  Copyright (C) 2000  Pavel Machek <pavel at suse.cz>
 + * 
-+ *  $Id$
-+ *
 + *  Jun Nakajima <jun.nakajima at intel.com>
 + *  Asit Mallick <asit.k.mallick at intel.com>
 + *      Modified for Xen
@@ -30581,18 +26333,25 @@
 + * at the top of the kernel process stack.	
 + * - partial stack frame: partially saved registers upto R11.
 + * - full stack frame: Like partial stack frame, but all register saved. 
-+ *	
-+ * TODO:	 
-+ * - schedule it carefully for the final hardware.
++ *
++ * Some macro usage:
++ * - CFI macros are used to generate dwarf2 unwind information for better
++ * backtraces. They don't change any code.
++ * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
++ * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
++ * There are unfortunately lots of special cases where some registers
++ * not touched. The macro is a big mess that should be cleaned up.
++ * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
++ * Gives a full stack frame.
++ * - ENTRY/END Define functions in the symbol table.
++ * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
++ * frame that is otherwise undefined after a SYSCALL
++ * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
++ * - errorentry/paranoidentry/zeroentry - Define exception entry points.
 + */
 +
-+#define ASSEMBLY 1
-+#ifdef CONFIG_DEBUG_INFO
-+#undef CONFIG_DEBUG_INFO
-+#endif
 +#include <linux/linkage.h>
 +#include <asm/segment.h>
-+#include <asm/smp.h>
 +#include <asm/cache.h>
 +#include <asm/errno.h>
 +#include <asm/dwarf2.h>
@@ -30611,7 +26370,7 @@
 +#include "irq_vectors.h"
 +
 +#include "xen_entry.S"
-+	
++
 +	.code64
 +
 +#ifndef CONFIG_PREEMPT
@@ -30629,7 +26388,7 @@
 +.endm
 +
 +NMI_MASK = 0x80000000
-+	
++
 +/*
 + * C code is not supposed to know about undefined top of stack. Every time 
 + * a C function with an pt_regs argument is called from the SYSCALL based 
@@ -30677,6 +26436,7 @@
 +	.macro	CFI_DEFAULT_STACK start=1
 +	.if \start
 +	CFI_STARTPROC	simple
++	CFI_SIGNAL_FRAME
 +	CFI_DEF_CFA	rsp,SS+8
 +	.else
 +	CFI_DEF_CFA_OFFSET SS+8
@@ -30703,13 +26463,13 @@
 +	/*CFI_REL_OFFSET	ss,SS*/
 +	.endm
 +
-+        /*
-+         * Must be consistent with the definition in arch-x86_64.h:    
-+         *     struct iret_context {
-+         *        u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
-+         *     };
-+         * #define VGCF_IN_SYSCALL (1<<8) 
-+         */
++	/*
++	 * Must be consistent with the definition in arch-x86_64.h:    
++	 *     struct iret_context {
++	 *        u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++	 *     };
++	 * #define VGCF_IN_SYSCALL (1<<8) 
++	 */
 +	.macro HYPERVISOR_IRET flag
 +	testb $3,1*8(%rsp)
 +	jnz   2f
@@ -30730,11 +26490,11 @@
 +	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
 +	.endm
 +
-+        .macro SWITCH_TO_KERNEL ssoff,adjust=0
++	.macro SWITCH_TO_KERNEL ssoff,adjust=0
 +	jc  1f
 +	orb  $1,\ssoff-\adjust+4(%rsp)
 +1:
-+        .endm
++	.endm
 +
 +/*
 + * A newly forked process directly context switches into this.
@@ -30742,9 +26502,13 @@
 +/* rdi:	prev */	
 +ENTRY(ret_from_fork)
 +	CFI_DEFAULT_STACK
++	push kernel_eflags(%rip)
++	CFI_ADJUST_CFA_OFFSET 4
++	popf				# reset kernel eflags
++	CFI_ADJUST_CFA_OFFSET -4
 +	call schedule_tail
 +	GET_THREAD_INFO(%rcx)
-+	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
++	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
 +	jnz rff_trace
 +rff_action:	
 +	RESTORE_REST
@@ -30795,6 +26559,7 @@
 +
 +ENTRY(system_call)
 +	CFI_STARTPROC	simple
++	CFI_SIGNAL_FRAME
 +	CFI_DEF_CFA	rsp,PDA_STACKOFFSET
 +	CFI_REGISTER	rip,rcx
 +	/*CFI_REGISTER	rflags,r11*/
@@ -30804,10 +26569,9 @@
 +	 * No need to follow this irqs off/on section - it's straight
 +	 * and short:
 +	 */
-+        XEN_UNBLOCK_EVENTS(%r11)        
++	XEN_UNBLOCK_EVENTS(%r11)
 +	GET_THREAD_INFO(%rcx)
 +	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
-+	CFI_REMEMBER_STATE
 +	jnz tracesys
 +	cmpq $__NR_syscall_max,%rax
 +	ja badsys
@@ -30818,36 +26582,35 @@
 + * Syscall return path ending with SYSRET (fast path)
 + * Has incomplete stack frame and undefined top of stack. 
 + */		
-+	.globl ret_from_sys_call
 +ret_from_sys_call:
 +	movl $_TIF_ALLWORK_MASK,%edi
 +	/* edi:	flagmask */
 +sysret_check:		
 +	GET_THREAD_INFO(%rcx)
-+        XEN_BLOCK_EVENTS(%rsi)        
++	XEN_BLOCK_EVENTS(%rsi)
 +	TRACE_IRQS_OFF
 +	movl threadinfo_flags(%rcx),%edx
 +	andl %edi,%edx
-+	CFI_REMEMBER_STATE
 +	jnz  sysret_careful 
++	CFI_REMEMBER_STATE
 +	/*
 +	 * sysretq will re-enable interrupts:
 +	 */
 +	TRACE_IRQS_ON
-+        XEN_UNBLOCK_EVENTS(%rsi)                
++	XEN_UNBLOCK_EVENTS(%rsi)
 +	CFI_REGISTER	rip,rcx
 +	RESTORE_ARGS 0,8,0
 +	/*CFI_REGISTER	rflags,r11*/
-+        HYPERVISOR_IRET VGCF_IN_SYSCALL
++	HYPERVISOR_IRET VGCF_IN_SYSCALL
 +
++	CFI_RESTORE_STATE
 +	/* Handle reschedules */
 +	/* edx:	work, edi: workmask */	
 +sysret_careful:
-+	CFI_RESTORE_STATE
 +	bt $TIF_NEED_RESCHED,%edx
 +	jnc sysret_signal
 +	TRACE_IRQS_ON
-+        XEN_UNBLOCK_EVENTS(%rsi)        
++	XEN_UNBLOCK_EVENTS(%rsi)
 +	pushq %rdi
 +	CFI_ADJUST_CFA_OFFSET 8
 +	call schedule
@@ -30858,8 +26621,7 @@
 +	/* Handle a signal */ 
 +sysret_signal:
 +	TRACE_IRQS_ON
-+/*	sti */
-+        XEN_UNBLOCK_EVENTS(%rsi)        
++	XEN_UNBLOCK_EVENTS(%rsi)
 +	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
 +	jz    1f
 +
@@ -30882,7 +26644,6 @@
 +
 +	/* Do syscall tracing */
 +tracesys:			 
-+	CFI_RESTORE_STATE
 +	SAVE_REST
 +	movq $-ENOSYS,RAX(%rsp)
 +	FIXUP_TOP_OF_STACK %rdi
@@ -30891,44 +26652,28 @@
 +	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
 +	RESTORE_REST
 +	cmpq $__NR_syscall_max,%rax
++	movq $-ENOSYS,%rcx
++	cmova %rcx,%rax
 +	ja  1f
 +	movq %r10,%rcx	/* fixup for C */
 +	call *sys_call_table(,%rax,8)
 +1:	movq %rax,RAX-ARGOFFSET(%rsp)
 +	/* Use IRET because user could have changed frame */
-+	jmp int_ret_from_sys_call
-+	CFI_ENDPROC
-+END(system_call)
 +		
 +/* 
 + * Syscall return path ending with IRET.
 + * Has correct top of stack, but partial stack frame.
-+ */ 	
-+ENTRY(int_ret_from_sys_call)
-+	CFI_STARTPROC	simple
-+	CFI_DEF_CFA	rsp,SS+8-ARGOFFSET
-+	/*CFI_REL_OFFSET	ss,SS-ARGOFFSET*/
-+	CFI_REL_OFFSET	rsp,RSP-ARGOFFSET
-+	/*CFI_REL_OFFSET	rflags,EFLAGS-ARGOFFSET*/
-+	/*CFI_REL_OFFSET	cs,CS-ARGOFFSET*/
-+	CFI_REL_OFFSET	rip,RIP-ARGOFFSET
-+	CFI_REL_OFFSET	rdx,RDX-ARGOFFSET
-+	CFI_REL_OFFSET	rcx,RCX-ARGOFFSET
-+	CFI_REL_OFFSET	rax,RAX-ARGOFFSET
-+	CFI_REL_OFFSET	rdi,RDI-ARGOFFSET
-+	CFI_REL_OFFSET	rsi,RSI-ARGOFFSET
-+	CFI_REL_OFFSET	r8,R8-ARGOFFSET
-+	CFI_REL_OFFSET	r9,R9-ARGOFFSET
-+	CFI_REL_OFFSET	r10,R10-ARGOFFSET
-+	CFI_REL_OFFSET	r11,R11-ARGOFFSET
-+        XEN_BLOCK_EVENTS(%rsi)
++ */
++	.globl int_ret_from_sys_call
++int_ret_from_sys_call:
++	XEN_BLOCK_EVENTS(%rsi)
 +	TRACE_IRQS_OFF
 +	testb $3,CS-ARGOFFSET(%rsp)
-+        jnz 1f
-+        /* Need to set the proper %ss (not NULL) for ring 3 iretq */
-+        movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
-+        jmp retint_restore_args   # retrun from ring3 kernel
-+1:              
++	jnz 1f
++	/* Need to set the proper %ss (not NULL) for ring 3 iretq */
++	movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
++	jmp retint_restore_args   # retrun from ring3 kernel
++1:
 +	movl $_TIF_ALLWORK_MASK,%edi
 +	/* edi:	mask to check */
 +int_with_check:
@@ -30946,8 +26691,7 @@
 +	bt $TIF_NEED_RESCHED,%edx
 +	jnc  int_very_careful
 +	TRACE_IRQS_ON
-+/*	sti */
-+        XEN_UNBLOCK_EVENTS(%rsi)
++	XEN_UNBLOCK_EVENTS(%rsi)
 +	pushq %rdi
 +	CFI_ADJUST_CFA_OFFSET 8
 +	call schedule
@@ -30960,8 +26704,7 @@
 +	/* handle signals and tracing -- both require a full stack frame */
 +int_very_careful:
 +	TRACE_IRQS_ON
-+/*	sti */
-+        XEN_UNBLOCK_EVENTS(%rsi)
++	XEN_UNBLOCK_EVENTS(%rsi)
 +	SAVE_REST
 +	/* Check for syscall exit trace */	
 +	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
@@ -30973,8 +26716,6 @@
 +	popq %rdi
 +	CFI_ADJUST_CFA_OFFSET -8
 +	andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
-+	XEN_BLOCK_EVENTS(%rsi)
-+	TRACE_IRQS_OFF
 +	jmp int_restore_rest
 +	
 +int_signal:
@@ -30990,7 +26731,7 @@
 +	TRACE_IRQS_OFF
 +	jmp int_with_check
 +	CFI_ENDPROC
-+END(int_ret_from_sys_call)
++END(system_call)
 +		
 +/* 
 + * Certain special system calls that need to save a complete full stack frame.
@@ -31072,6 +26813,7 @@
 + */
 +	.macro _frame ref
 +	CFI_STARTPROC simple
++	CFI_SIGNAL_FRAME
 +	CFI_DEF_CFA rsp,SS+8-\ref
 +	/*CFI_REL_OFFSET ss,SS-\ref*/
 +	CFI_REL_OFFSET rsp,RSP-\ref
@@ -31115,30 +26857,28 @@
 +	jnc   retint_signal
 +	TRACE_IRQS_ON
 +	XEN_UNBLOCK_EVENTS(%rsi)
-+/*	sti */        
 +	pushq %rdi
 +	CFI_ADJUST_CFA_OFFSET	8
 +	call  schedule
 +	popq %rdi		
 +	CFI_ADJUST_CFA_OFFSET	-8
 +	GET_THREAD_INFO(%rcx)
-+	XEN_BLOCK_EVENTS(%rsi)		
++	XEN_BLOCK_EVENTS(%rsi)
 +	TRACE_IRQS_OFF
-+/*	cli */
 +	jmp retint_check
 +	
 +retint_signal:
 +	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
 +	jz    retint_restore_args
 +	TRACE_IRQS_ON
-+        XEN_UNBLOCK_EVENTS(%rsi)
++	XEN_UNBLOCK_EVENTS(%rsi)
 +	SAVE_REST
 +	movq $-1,ORIG_RAX(%rsp) 			
 +	xorl %esi,%esi		# oldset
 +	movq %rsp,%rdi		# &pt_regs
 +	call do_notify_resume
 +	RESTORE_REST
-+        XEN_BLOCK_EVENTS(%rsi)		
++	XEN_BLOCK_EVENTS(%rsi)
 +	TRACE_IRQS_OFF
 +	movl $_TIF_NEED_RESCHED,%edi
 +	GET_THREAD_INFO(%rcx)
@@ -31147,8 +26887,7 @@
 +#ifdef CONFIG_PREEMPT
 +	/* Returning to kernel space. Check if we need preemption */
 +	/* rcx:	 threadinfo. interrupts off. */
-+	.p2align
-+retint_kernel:	
++ENTRY(retint_kernel)
 +	cmpl $0,threadinfo_preempt_count(%rcx)
 +	jnz  retint_restore_args
 +	bt  $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
@@ -31208,7 +26947,6 @@
 +END(call_function_interrupt)
 +#endif
 +
-+#ifdef CONFIG_X86_LOCAL_APIC	
 +ENTRY(apic_timer_interrupt)
 +	apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
 +END(apic_timer_interrupt)
@@ -31220,7 +26958,6 @@
 +ENTRY(spurious_interrupt)
 +	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
 +END(spurious_interrupt)
-+#endif
 +#endif /* !CONFIG_XEN */
 +				
 +/*
@@ -31228,9 +26965,9 @@
 + */ 		
 +	.macro zeroentry sym
 +	INTR_FRAME
-+        movq (%rsp),%rcx
-+        movq 8(%rsp),%r11
-+        addq $0x10,%rsp /* skip rcx and r11 */
++	movq (%rsp),%rcx
++	movq 8(%rsp),%r11
++	addq $0x10,%rsp /* skip rcx and r11 */
 +	pushq $0	/* push error code/oldrax */ 
 +	CFI_ADJUST_CFA_OFFSET 8
 +	pushq %rax	/* push real oldrax to the rdi slot */ 
@@ -31242,9 +26979,9 @@
 +
 +	.macro errorentry sym
 +	XCPT_FRAME
-+        movq (%rsp),%rcx
-+        movq 8(%rsp),%r11
-+        addq $0x10,%rsp /* rsp points to the error code */
++	movq (%rsp),%rcx
++	movq 8(%rsp),%r11
++	addq $0x10,%rsp /* rsp points to the error code */
 +	pushq %rax
 +	CFI_ADJUST_CFA_OFFSET 8
 +	leaq  \sym(%rip),%rax
@@ -31256,9 +26993,9 @@
 +	/* error code is on the stack already */
 +	/* handle NMI like exceptions that can happen everywhere */
 +	.macro paranoidentry sym, ist=0
-+        movq (%rsp),%rcx
-+        movq 8(%rsp),%r11
-+        addq $0x10,%rsp /* skip rcx and r11 */        
++	movq (%rsp),%rcx
++	movq 8(%rsp),%r11
++	addq $0x10,%rsp /* skip rcx and r11 */        
 +	SAVE_ALL
 +	cld
 +#if 0 /* not XEN */
@@ -31284,17 +27021,18 @@
 +	.if \ist
 +	addq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
 +	.endif
-+/*	cli */
-+	TRACE_IRQS_OFF
 +	XEN_BLOCK_EVENTS(%rsi)		
++	.if \irqtrace
++	TRACE_IRQS_OFF
++	.endif
 +	.endm
 +#endif
-+	
++
 +/*
 + * Exception entry point. This expects an error code/orig_rax on the stack
 + * and the exception handler in %rax.	
 + */ 		  				
-+ENTRY(error_entry)
++KPROBE_ENTRY(error_entry)
 +	_frame RDI
 +	/* rdi slot contains rax, oldrax contains error code */
 +	cld	
@@ -31329,20 +27067,19 @@
 +	CFI_REL_OFFSET	r14,R14
 +	movq %r15,(%rsp) 
 +	CFI_REL_OFFSET	r15,R15
-+#if 0        
++#if 0
 +	cmpl $__KERNEL_CS,CS(%rsp)
 +	je  error_kernelspace
-+#endif        
++#endif
 +error_call_handler:
-+	movq %rdi, RDI(%rsp)            
++	movq %rdi,RDI(%rsp) 	
 +	movq %rsp,%rdi
-+	movq ORIG_RAX(%rsp),%rsi	# get error code 
++	movq ORIG_RAX(%rsp),%rsi	/* get error code */ 
 +	movq $-1,ORIG_RAX(%rsp)
 +	call *%rax
 +error_exit:		
 +	RESTORE_REST
-+/*	cli */
-+	XEN_BLOCK_EVENTS(%rsi)		
++	XEN_BLOCK_EVENTS(%rsi)
 +	TRACE_IRQS_OFF
 +	GET_THREAD_INFO(%rcx)	
 +	testb $3,CS-ARGOFFSET(%rsp)
@@ -31354,13 +27091,13 @@
 +	jmp   retint_restore_args
 +
 +error_kernelspace:
-+         /*
-+         * We need to re-write the logic here because we don't do iretq to 
-+         * to return to user mode. It's still possible that we get trap/fault
-+         * in the kernel (when accessing buffers pointed to by system calls, 
-+         * for example).
-+         *
-+         */           
++	/*
++	 * We need to re-write the logic here because we don't do iretq to
++	 * to return to user mode. It's still possible that we get trap/fault
++	 * in the kernel (when accessing buffers pointed to by system calls,
++	 * for example).
++	 *
++	 */
 +#if 0
 +	incl %ebx
 +       /* There are two places in the kernel that can potentially fault with
@@ -31377,9 +27114,9 @@
 +	cmpq $gs_change,RIP(%rsp)
 +        je   error_swapgs
 +	jmp  error_sti
-+#endif        
-+END(error_entry)
-+		
++#endif
++KPROBE_END(error_entry)
++
 +ENTRY(hypervisor_callback)
 +	zeroentry do_hypervisor_callback
 +        
@@ -31410,19 +27147,17 @@
 +	decl %gs:pda_irqcount
 +	jmp  error_exit
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
 +KPROBE_ENTRY(nmi)
 +	zeroentry do_nmi_callback
-+ENTRY(do_nmi_callback)
-+        addq $8, %rsp
-+        call do_nmi
-+        orl  $NMI_MASK,EFLAGS(%rsp)
-+        RESTORE_REST
-+        XEN_BLOCK_EVENTS(%rsi)
-+        GET_THREAD_INFO(%rcx)
-+        jmp  retint_restore_args
++ENTRY(xen_do_nmi_callback)
++	addq $8, %rsp
++	call do_nmi
++	orl  $NMI_MASK,EFLAGS(%rsp)
++	RESTORE_REST
++	XEN_BLOCK_EVENTS(%rsi)
++	GET_THREAD_INFO(%rcx)
++	jmp  retint_restore_args
 +	.previous .text
-+#endif
 +
 +        ALIGN
 +restore_all_enable_events:  
@@ -31434,11 +27169,11 @@
 +	XEN_PUT_VCPU_INFO(%rsi)
 +        RESTORE_ARGS 0,8,0
 +        HYPERVISOR_IRET 0
-+        
++
 +14:	XEN_LOCKED_BLOCK_EVENTS(%rsi)
 +	XEN_PUT_VCPU_INFO(%rsi)
 +	SAVE_REST
-+        movq %rsp,%rdi                  # set the argument again
++	movq %rsp,%rdi                  # set the argument again
 +	jmp  11b
 +ecrit:  /**** END OF CRITICAL REGION ****/
 +# At this point, unlike on x86-32, we don't do the fixup to simplify the 
@@ -31571,7 +27306,7 @@
 + * do_sys_execve asm fallback arguments:
 + *	rdi: name, rsi: argv, rdx: envp, fake frame on the stack
 + */
-+ENTRY(execve)
++ENTRY(kernel_execve)
 +	CFI_STARTPROC
 +	FAKE_STACK_FRAME $0
 +	SAVE_ALL	
@@ -31580,17 +27315,16 @@
 +	RESTORE_REST
 +	testq %rax,%rax
 +	jne 1f
-+        jmp int_ret_from_sys_call
-+1:      RESTORE_ARGS
++	jmp int_ret_from_sys_call
++1:	RESTORE_ARGS
 +	UNFAKE_STACK_FRAME
 +	ret
 +	CFI_ENDPROC
-+ENDPROC(execve)
++ENDPROC(kernel_execve)
 +
 +KPROBE_ENTRY(page_fault)
 +	errorentry do_page_fault
-+END(page_fault)
-+	.previous .text
++KPROBE_END(page_fault)
 +
 +ENTRY(coprocessor_error)
 +	zeroentry do_coprocessor_error
@@ -31612,7 +27346,7 @@
 +	zeroentry do_debug
 +/*	jmp paranoid_exit */
 +	CFI_ENDPROC
-+END(debug)
++KPROBE_END(debug)
 +	.previous .text
 +
 +#if 0
@@ -31628,7 +27362,7 @@
 +	jmp paranoid_exit1
 + 	CFI_ENDPROC
 +#endif
-+END(nmi)
++KPROBE_END(nmi)
 +	.previous .text
 +#endif        
 +
@@ -31639,8 +27373,7 @@
 + 	zeroentry do_int3
 +/* 	jmp paranoid_exit */
 + 	CFI_ENDPROC
-+END(int3)
-+	.previous .text
++KPROBE_END(int3)
 +
 +ENTRY(overflow)
 +	zeroentry do_overflow
@@ -31679,6 +27412,7 @@
 +ENTRY(segment_not_present)
 +	errorentry do_segment_not_present
 +END(segment_not_present)
++
 +	/* runs on exception stack */
 +ENTRY(stack_segment)
 +	XCPT_FRAME
@@ -31688,8 +27422,7 @@
 +
 +KPROBE_ENTRY(general_protection)
 +	errorentry do_general_protection
-+END(general_protection)
-+	.previous .text
++KPROBE_END(general_protection)
 +
 +ENTRY(alignment_check)
 +	errorentry do_alignment_check
@@ -31715,6 +27448,7 @@
 +END(machine_check)
 +#endif
 +
++/* Call softirq on interrupt stack. Interrupts are off. */
 +ENTRY(call_softirq)
 +	CFI_STARTPROC
 +	push %rbp
@@ -31733,43 +27467,10 @@
 +	ret
 +	CFI_ENDPROC
 +ENDPROC(call_softirq)
-+
-+#ifdef CONFIG_STACK_UNWIND
-+ENTRY(arch_unwind_init_running)
-+	CFI_STARTPROC
-+	movq	%r15, R15(%rdi)
-+	movq	%r14, R14(%rdi)
-+	xchgq	%rsi, %rdx
-+	movq	%r13, R13(%rdi)
-+	movq	%r12, R12(%rdi)
-+	xorl	%eax, %eax
-+	movq	%rbp, RBP(%rdi)
-+	movq	%rbx, RBX(%rdi)
-+	movq	(%rsp), %rcx
-+	movq	%rax, R11(%rdi)
-+	movq	%rax, R10(%rdi)
-+	movq	%rax, R9(%rdi)
-+	movq	%rax, R8(%rdi)
-+	movq	%rax, RAX(%rdi)
-+	movq	%rax, RCX(%rdi)
-+	movq	%rax, RDX(%rdi)
-+	movq	%rax, RSI(%rdi)
-+	movq	%rax, RDI(%rdi)
-+	movq	%rax, ORIG_RAX(%rdi)
-+	movq	%rcx, RIP(%rdi)
-+	leaq	8(%rsp), %rcx
-+	movq	$__KERNEL_CS, CS(%rdi)
-+	movq	%rax, EFLAGS(%rdi)
-+	movq	%rcx, RSP(%rdi)
-+	movq	$__KERNEL_DS, SS(%rdi)
-+	jmpq	*%rdx
-+	CFI_ENDPROC
-+ENDPROC(arch_unwind_init_running)
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/genapic_xen.c linux-2.6.18-xen/arch/x86_64/kernel/genapic_xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/genapic_xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/genapic_xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,176 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/genapic_xen.c b/arch/x86_64/kernel/genapic_xen.c
+--- a/arch/x86_64/kernel/genapic_xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/genapic_xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,190 @@
 +/*
 + * Copyright 2004 James Cleverdon, IBM.
 + * Subject to the GNU Public License, v.2
@@ -31843,6 +27544,20 @@
 +	return cpu_online_map;
 +}
 +
++static cpumask_t xen_vector_allocation_domain(int cpu)
++{
++	/* Careful. Some cpus do not strictly honor the set of cpus
++	 * specified in the interrupt destination when using lowest
++	 * priority interrupt delivery mode.
++	 *
++	 * In particular there was a hyperthreading cpu observed to
++	 * deliver interrupts to the wrong hyperthread when only one
++	 * hyperthread was specified in the interrupt desitination.
++	 */
++	cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
++	return domain;
++}
++
 +/*
 + * Set up the logical destination ID.
 + * Do nothing, not called now.
@@ -31934,8 +27649,8 @@
 +	.int_delivery_mode = dest_LowestPrio,
 +#endif
 +	.int_dest_mode = (APIC_DEST_LOGICAL != 0),
-+	.int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
 +	.target_cpus = xen_target_cpus,
++	.vector_allocation_domain = xen_vector_allocation_domain,
 +#ifdef CONFIG_XEN_PRIVILEGED_GUEST
 +	.apic_id_registered = xen_apic_id_registered,
 +#endif
@@ -31946,9 +27661,9 @@
 +	.cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
 +	.phys_pkg_id = phys_pkg_id,
 +};
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/genapic-xen.c linux-2.6.18-xen/arch/x86_64/kernel/genapic-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/genapic-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/genapic-xen.c	2006-11-19 14:26:33.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/genapic-xen.c b/arch/x86_64/kernel/genapic-xen.c
+--- a/arch/x86_64/kernel/genapic-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/genapic-xen.c	2007-03-14 10:55:21.000000000 +0100
 @@ -0,0 +1,143 @@
 +/*
 + * Copyright 2004 James Cleverdon, IBM.
@@ -32093,116 +27808,86 @@
 +	xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
 +#endif
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/head64-xen.c linux-2.6.18-xen/arch/x86_64/kernel/head64-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/head64-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/head64-xen.c	2006-11-19 14:26:33.000000000 +0100
-@@ -0,0 +1,161 @@
-+/*
-+ *  linux/arch/x86_64/kernel/head64.c -- prepare to run common code
-+ *
-+ *  Copyright (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
+--- a/arch/x86_64/kernel/head64.c	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/kernel/head64.c	2007-03-14 10:55:21.000000000 +0100
+@@ -2,6 +2,9 @@
+  *  linux/arch/x86_64/kernel/head64.c -- prepare to run common code
+  *
+  *  Copyright (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
 + *
 + *  Jun Nakajima <jun.nakajima at intel.com>
 + *	Modified for Xen.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/linkage.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/percpu.h>
+  */
+ 
+ #include <linux/init.h>
+@@ -10,6 +13,7 @@
+ #include <linux/kernel.h>
+ #include <linux/string.h>
+ #include <linux/percpu.h>
 +#include <linux/module.h>
-+
-+#include <asm/processor.h>
-+#include <asm/proto.h>
-+#include <asm/smp.h>
-+#include <asm/bootsetup.h>
-+#include <asm/setup.h>
-+#include <asm/desc.h>
-+#include <asm/pgtable.h>
-+#include <asm/sections.h>
-+
+ 
+ #include <asm/processor.h>
+ #include <asm/proto.h>
+@@ -20,13 +24,19 @@
+ #include <asm/pgtable.h>
+ #include <asm/sections.h>
+ 
++#ifdef CONFIG_XEN
 +unsigned long start_pfn;
-+
-+/* Don't add a printk in there. printk relies on the PDA which is not initialized 
-+   yet. */
-+#if 0
-+static void __init clear_bss(void)
-+{
-+	memset(__bss_start, 0,
-+	       (unsigned long) __bss_stop - (unsigned long) __bss_start);
-+}
 +#endif
 +
-+#define NEW_CL_POINTER		0x228	/* Relative to real mode data */
-+#define OLD_CL_MAGIC_ADDR	0x90020
-+#define OLD_CL_MAGIC            0xA33F
-+#define OLD_CL_BASE_ADDR        0x90000
-+#define OLD_CL_OFFSET           0x90022
-+
-+extern char saved_command_line[];
-+
-+static void __init copy_bootdata(char *real_mode_data)
-+{
+ /* Don't add a printk in there. printk relies on the PDA which is not initialized 
+    yet. */
 +#ifndef CONFIG_XEN
-+	int new_data;
-+	char * command_line;
-+
-+	memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
-+	new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
-+	if (!new_data) {
-+		if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
-+			printk("so old bootloader that it does not support commandline?!\n");
-+			return;
-+		}
-+		new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
-+		printk("old bootloader convention, maybe loadlin?\n");
-+	}
-+	command_line = (char *) ((u64)(new_data));
-+	memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
+ static void __init clear_bss(void)
+ {
+ 	memset(__bss_start, 0,
+ 	       (unsigned long) __bss_stop - (unsigned long) __bss_start);
+ }
++#endif
+ 
+ #define NEW_CL_POINTER		0x228	/* Relative to real mode data */
+ #define OLD_CL_MAGIC_ADDR	0x90020
+@@ -38,6 +48,7 @@
+ 
+ static void __init copy_bootdata(char *real_mode_data)
+ {
++#ifndef CONFIG_XEN
+ 	int new_data;
+ 	char * command_line;
+ 
+@@ -51,26 +62,67 @@
+ 	}
+ 	command_line = (char *) ((u64)(new_data));
+ 	memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
 +#else
 +	int max_cmdline;
-+	
++
 +	if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
 +		max_cmdline = COMMAND_LINE_SIZE;
 +	memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
 +	saved_command_line[max_cmdline-1] = '\0';
 +#endif
-+	printk("Bootdata ok (command line is %s)\n", saved_command_line);
-+}
-+
-+static void __init setup_boot_cpu_data(void)
-+{
-+	unsigned int dummy, eax;
-+
-+	/* get vendor info */
-+	cpuid(0, (unsigned int *)&boot_cpu_data.cpuid_level,
-+	      (unsigned int *)&boot_cpu_data.x86_vendor_id[0],
-+	      (unsigned int *)&boot_cpu_data.x86_vendor_id[8],
-+	      (unsigned int *)&boot_cpu_data.x86_vendor_id[4]);
-+
-+	/* get cpu type */
-+	cpuid(1, &eax, &dummy, &dummy,
-+		(unsigned int *) &boot_cpu_data.x86_capability);
-+	boot_cpu_data.x86 = (eax >> 8) & 0xf;
-+	boot_cpu_data.x86_model = (eax >> 4) & 0xf;
-+	boot_cpu_data.x86_mask = eax & 0xf;
-+}
-+
+ }
+ 
++#ifdef CONFIG_XEN
 +#include <xen/interface/memory.h>
 +unsigned long *machine_to_phys_mapping;
 +EXPORT_SYMBOL(machine_to_phys_mapping);
 +unsigned int machine_to_phys_order;
 +EXPORT_SYMBOL(machine_to_phys_order);
++#endif
 +
-+void __init x86_64_start_kernel(char * real_mode_data)
-+{
+ void __init x86_64_start_kernel(char * real_mode_data)
+ {
++#ifdef CONFIG_XEN
 +	struct xen_machphys_mapping mapping;
 +	unsigned long machine_to_phys_nr_ents;
-+	char *s;
-+	int i;
-+
++#endif
+ 	int i;
+ 
++#ifdef CONFIG_XEN
 +	xen_start_info = (struct start_info *)real_mode_data;
 +	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
 +		phys_to_machine_mapping =
@@ -32220,48 +27905,31 @@
 +	while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
 +		machine_to_phys_order++;
 +
-+#if 0
-+	for (i = 0; i < 256; i++)
-+		set_intr_gate(i, early_idt_handler);
-+	asm volatile("lidt %0" :: "m" (idt_descr));
-+#endif
-+
-+	/*
-+	 * This must be called really, really early:
-+	 */
-+	lockdep_init();
-+
-+ 	for (i = 0; i < NR_CPUS; i++)
-+ 		cpu_pda(i) = &boot_cpu_pda[i];
-+
-+	pda_init(0);
-+	copy_bootdata(real_mode_data);
-+#ifdef CONFIG_SMP
-+	cpu_set(0, cpu_online_map);
-+#endif
-+	s = strstr(saved_command_line, "earlyprintk=");
-+	if (s != NULL)
-+		setup_early_printk(strchr(s, '=') + 1);
-+#ifdef CONFIG_NUMA
-+	s = strstr(saved_command_line, "numa=");
-+	if (s != NULL)
-+		numa_setup(s+5);
-+#endif
-+#ifdef CONFIG_X86_IO_APIC
-+	if (strstr(saved_command_line, "disableapic"))
-+		disable_apic = 1;
++#else
+ 	/* clear bss before set_intr_gate with early_idt_handler */
+ 	clear_bss();
+ 
+ 	for (i = 0; i < IDT_ENTRIES; i++)
+ 		set_intr_gate(i, early_idt_handler);
+ 	asm volatile("lidt %0" :: "m" (idt_descr));
+-
++#endif /* CONFIG_XEN */
+ 	early_printk("Kernel alive\n");
+ 
++#ifndef CONFIG_XEN
+ 	/*
+ 	 * switch to init_level4_pgt from boot_level4_pgt
+ 	 */
+ 	memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t));
+ 	asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
 +#endif
-+	/* You need early console to see that */
-+	if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
-+		panic("Kernel too big for kernel mapping\n");
-+
-+	setup_boot_cpu_data();
-+	start_kernel();
-+}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/head-xen.S linux-2.6.18-xen/arch/x86_64/kernel/head-xen.S
---- linux-2.6.18.3/arch/x86_64/kernel/head-xen.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/head-xen.S	2006-11-19 14:26:33.000000000 +0100
-@@ -0,0 +1,191 @@
+ 
+  	for (i = 0; i < NR_CPUS; i++)
+  		cpu_pda(i) = &boot_cpu_pda[i];
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/head-xen.S b/arch/x86_64/kernel/head-xen.S
+--- a/arch/x86_64/kernel/head-xen.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/head-xen.S	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,189 @@
 +/*
 + *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
 + *
@@ -32270,8 +27938,6 @@
 + *  Copyright (C) 2000 Karsten Keil <kkeil at suse.de>
 + *  Copyright (C) 2001,2002 Andi Kleen <ak at suse.de>
 + *
-+ *  $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
-+ *
 + *  Jun Nakajima <jun.nakajima at intel.com>
 + *    Modified for Xen                                
 + */
@@ -32374,22 +28040,22 @@
 +
 +/* The TLS descriptors are currently at a different place compared to i386.
 +   Hopefully nobody expects them at a fixed place (Wine?) */
-+
++	
 +ENTRY(cpu_gdt_table)
 +	.quad	0x0000000000000000	/* NULL descriptor */
 +	.quad	0x0			/* unused */
 +	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
 +	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
 +	.quad	0x00cffa000000ffff	/* __USER32_CS */
-+	.quad	0x00cff2000000ffff	/* __USER_DS, __USER32_DS  */
++	.quad	0x00cff2000000ffff	/* __USER_DS, __USER32_DS  */		
 +	.quad	0x00affa000000ffff	/* __USER_CS */
 +	.quad	0x00cf9a000000ffff	/* __KERNEL32_CS */
 +	.quad	0,0			/* TSS */
 +	.quad	0,0			/* LDT */
-+	.quad   0,0,0			/* three TLS descriptors */
-+	.quad	0			/* unused */
-+gdt_end:
-+	/* asm/segment.h:GDT_ENTRIES must match this */
++	.quad   0,0,0			/* three TLS descriptors */ 
++	.quad	0x0000f40000000000	/* node/CPU stored in limit */
++gdt_end:	
++	/* asm/segment.h:GDT_ENTRIES must match this */	
 +	/* This should be a multiple of the cache line size */
 +	/* GDTs of other CPUs are now dynamically allocated */
 +
@@ -32453,9 +28119,9 @@
 +	ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad,  hypercall_page)
 +	ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,       .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
 +	ELFNOTE(Xen, XEN_ELFNOTE_LOADER,         .asciz, "generic")
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/init_task.c linux-2.6.18-xen/arch/x86_64/kernel/init_task.c
---- linux-2.6.18.3/arch/x86_64/kernel/init_task.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/kernel/init_task.c	2006-11-19 14:26:33.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c
+--- a/arch/x86_64/kernel/init_task.c	2007-03-12 21:58:06.000000000 +0100
++++ b/arch/x86_64/kernel/init_task.c	2007-03-14 10:55:21.000000000 +0100
 @@ -37,6 +37,8 @@
  struct task_struct init_task = INIT_TASK(init_task);
  
@@ -32473,10 +28139,10 @@
  
  /* Copies of the original ist values from the tss are only accessed during
   * debugging, no special alignment required.
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/io_apic-xen.c linux-2.6.18-xen/arch/x86_64/kernel/io_apic-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/io_apic-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/io_apic-xen.c	2006-11-19 14:26:33.000000000 +0100
-@@ -0,0 +1,2263 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/io_apic-xen.c b/arch/x86_64/kernel/io_apic-xen.c
+--- a/arch/x86_64/kernel/io_apic-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/io_apic-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,2221 @@
 +/*
 + *	Intel IO-APIC support for multi-Pentium hosts.
 + *
@@ -32505,9 +28171,12 @@
 +#include <linux/delay.h>
 +#include <linux/sched.h>
 +#include <linux/smp_lock.h>
++#include <linux/pci.h>
 +#include <linux/mc146818rtc.h>
 +#include <linux/acpi.h>
 +#include <linux/sysdev.h>
++#include <linux/msi.h>
++#include <linux/htirq.h>
 +#ifdef CONFIG_ACPI
 +#include <acpi/acpi_bus.h>
 +#endif
@@ -32520,6 +28189,31 @@
 +#include <asm/acpi.h>
 +#include <asm/dma.h>
 +#include <asm/nmi.h>
++#include <asm/msidef.h>
++#include <asm/hypertransport.h>
++
++DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
++	[0 ... FIRST_EXTERNAL_VECTOR - 1] = -1,
++	[FIRST_EXTERNAL_VECTOR + 0] = 0,
++	[FIRST_EXTERNAL_VECTOR + 1] = 1,
++	[FIRST_EXTERNAL_VECTOR + 2] = 2,
++	[FIRST_EXTERNAL_VECTOR + 3] = 3,
++	[FIRST_EXTERNAL_VECTOR + 4] = 4,
++	[FIRST_EXTERNAL_VECTOR + 5] = 5,
++	[FIRST_EXTERNAL_VECTOR + 6] = 6,
++	[FIRST_EXTERNAL_VECTOR + 7] = 7,
++	[FIRST_EXTERNAL_VECTOR + 8] = 8,
++	[FIRST_EXTERNAL_VECTOR + 9] = 9,
++	[FIRST_EXTERNAL_VECTOR + 10] = 10,
++	[FIRST_EXTERNAL_VECTOR + 11] = 11,
++	[FIRST_EXTERNAL_VECTOR + 12] = 12,
++	[FIRST_EXTERNAL_VECTOR + 13] = 13,
++	[FIRST_EXTERNAL_VECTOR + 14] = 14,
++	[FIRST_EXTERNAL_VECTOR + 15] = 15,
++	[FIRST_EXTERNAL_VECTOR + 16 ... NR_VECTORS - 1] = -1
++};
++
++static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result);
 +
 +#define __apicdebuginit  __init
 +
@@ -32527,17 +28221,17 @@
 +
 +static int no_timer_check;
 +
-+int disable_timer_pin_1 __initdata;
++static int disable_timer_pin_1 __initdata;
 +
-+#ifndef CONFIG_XEN
-+int timer_over_8254 __initdata = 0;
++int timer_over_8254 __initdata = 1;
 +
++#ifndef CONFIG_XEN
 +/* Where if anywhere is the i8259 connect in external int mode */
 +static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
 +#endif
 +
 +static DEFINE_SPINLOCK(ioapic_lock);
-+static DEFINE_SPINLOCK(vector_lock);
++DEFINE_SPINLOCK(vector_lock);
 +
 +/*
 + * # of IRQ routing registers
@@ -32562,13 +28256,51 @@
 +	short apic, pin, next;
 +} irq_2_pin[PIN_MAP_SIZE];
 +
-+int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
-+#ifdef CONFIG_PCI_MSI
-+#define vector_to_irq(vector) 	\
-+	(platform_legacy_irq(vector) ? vector : vector_irq[vector])
-+#else
-+#define vector_to_irq(vector)	(vector)
-+#endif
++struct io_apic {
++	unsigned int index;
++	unsigned int unused[3];
++	unsigned int data;
++};
++
++static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
++{
++	return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
++		+ (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
++}
++
++static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
++{
++	struct io_apic __iomem *io_apic = io_apic_base(apic);
++	writel(reg, &io_apic->index);
++	return readl(&io_apic->data);
++}
++
++static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++	struct io_apic __iomem *io_apic = io_apic_base(apic);
++	writel(reg, &io_apic->index);
++	writel(value, &io_apic->data);
++}
++
++/*
++ * Re-write a value: to be used for read-modify-write
++ * cycles where the read already set up the index register.
++ */
++static inline void io_apic_modify(unsigned int apic, unsigned int value)
++{
++	struct io_apic __iomem *io_apic = io_apic_base(apic);
++	writel(value, &io_apic->data);
++}
++
++/*
++ * Synchronize the IO-APIC and the CPU by doing
++ * a dummy read from the IO-APIC
++ */
++static inline void io_apic_sync(unsigned int apic)
++{
++	struct io_apic __iomem *io_apic = io_apic_base(apic);
++	readl(&io_apic->data);
++}
 +
 +#ifdef CONFIG_XEN
 +
@@ -32605,19 +28337,122 @@
 +	HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
 +}
 +
-+#define io_apic_read(a,r)    xen_io_apic_read(a,r)
-+#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++#define io_apic_read(a,r)    xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#define clear_IO_APIC() ((void)0)
++#endif
++
++#ifndef CONFIG_XEN
++#define __DO_ACTION(R, ACTION, FINAL)					\
++									\
++{									\
++	int pin;							\
++	struct irq_pin_list *entry = irq_2_pin + irq;			\
++									\
++	BUG_ON(irq >= NR_IRQS);						\
++	for (;;) {							\
++		unsigned int reg;					\
++		pin = entry->pin;					\
++		if (pin == -1)						\
++			break;						\
++		reg = io_apic_read(entry->apic, 0x10 + R + pin*2);	\
++		reg ACTION;						\
++		io_apic_modify(entry->apic, reg);			\
++		if (!entry->next)					\
++			break;						\
++		entry = irq_2_pin + entry->next;			\
++	}								\
++	FINAL;								\
++}
++#endif /* !CONFIG_XEN */
++
++union entry_union {
++	struct { u32 w1, w2; };
++	struct IO_APIC_route_entry entry;
++};
++
++static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
++{
++	union entry_union eu;
++	unsigned long flags;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
++	eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++	return eu.entry;
++}
++
++/*
++ * When we write a new IO APIC routing entry, we need to write the high
++ * word first! If the mask bit in the low word is clear, we will enable
++ * the interrupt, and we need to make sure the entry is fully populated
++ * before that happens.
++ */
++static void
++__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++{
++	union entry_union eu;
++	eu.entry = e;
++	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
++	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++}
++
++static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++{
++	unsigned long flags;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__ioapic_write_entry(apic, pin, e);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++/*
++ * When we mask an IO APIC routing entry, we need to write the low
++ * word first, in order to set the mask bit before we change the
++ * high bits!
++ */
++#ifndef CONFIG_XEN
++static void ioapic_mask_entry(int apic, int pin)
++{
++	unsigned long flags;
++	union entry_union eu = { .entry.mask = 1 };
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
 +
-+#define clear_IO_APIC() ((void)0)
++#ifdef CONFIG_SMP
++static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
++{
++	int apic, pin;
++	struct irq_pin_list *entry = irq_2_pin + irq;
 +
-+#else
++	BUG_ON(irq >= NR_IRQS);
++	for (;;) {
++		unsigned int reg;
++		apic = entry->apic;
++		pin = entry->pin;
++		if (pin == -1)
++			break;
++		io_apic_write(apic, 0x11 + pin*2, dest);
++		reg = io_apic_read(apic, 0x10 + pin*2);
++		reg &= ~0x000000ff;
++		reg |= vector;
++		io_apic_modify(apic, reg);
++		if (!entry->next)
++			break;
++		entry = irq_2_pin + entry->next;
++	}
++}
 +
-+#ifdef CONFIG_SMP
 +static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
 +{
 +	unsigned long flags;
 +	unsigned int dest;
 +	cpumask_t tmp;
++	int vector;
 +
 +	cpus_and(tmp, mask, cpu_online_map);
 +	if (cpus_empty(tmp))
@@ -32625,7 +28460,11 @@
 +
 +	cpus_and(mask, tmp, CPU_MASK_ALL);
 +
-+	dest = cpu_mask_to_apicid(mask);
++	vector = assign_irq_vector(irq, mask, &tmp);
++	if (vector < 0)
++		return;
++
++	dest = cpu_mask_to_apicid(tmp);
 +
 +	/*
 +	 * Only the high 8 bits are valid.
@@ -32633,12 +28472,11 @@
 +	dest = SET_APIC_LOGICAL_ID(dest);
 +
 +	spin_lock_irqsave(&ioapic_lock, flags);
-+	__DO_ACTION(1, = dest, )
-+	set_irq_info(irq, mask);
++	__target_IO_APIC_irq(irq, dest, vector);
++	set_native_irq_info(irq, mask);
 +	spin_unlock_irqrestore(&ioapic_lock, flags);
 +}
 +#endif
-+
 +#endif /* !CONFIG_XEN */
 +
 +/*
@@ -32666,27 +28504,6 @@
 +}
 +
 +#ifndef CONFIG_XEN
-+#define __DO_ACTION(R, ACTION, FINAL)					\
-+									\
-+{									\
-+	int pin;							\
-+	struct irq_pin_list *entry = irq_2_pin + irq;			\
-+									\
-+	BUG_ON(irq >= NR_IRQS);						\
-+	for (;;) {							\
-+		unsigned int reg;					\
-+		pin = entry->pin;					\
-+		if (pin == -1)						\
-+			break;						\
-+		reg = io_apic_read(entry->apic, 0x10 + R + pin*2);	\
-+		reg ACTION;						\
-+		io_apic_modify(entry->apic, reg);			\
-+		if (!entry->next)					\
-+			break;						\
-+		entry = irq_2_pin + entry->next;			\
-+	}								\
-+	FINAL;								\
-+}
 +
 +#define DO_ACTION(name,R,ACTION, FINAL)					\
 +									\
@@ -32719,24 +28536,15 @@
 +static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
 +{
 +	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
 +
 +	/* Check delivery_mode to be sure we're not clearing an SMI pin */
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	*(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+	*(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++	entry = ioapic_read_entry(apic, pin);
 +	if (entry.delivery_mode == dest_SMI)
 +		return;
 +	/*
 +	 * Disable it in the IO-APIC irq-routing table:
 +	 */
-+	memset(&entry, 0, sizeof(entry));
-+	entry.mask = 1;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
-+	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++	ioapic_mask_entry(apic, pin);
 +}
 +
 +static void clear_IO_APIC (void)
@@ -32749,17 +28557,6 @@
 +}
 +
 +#endif /* !CONFIG_XEN */
-+
-+static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
-+
-+/*
-+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
-+ * specific CPU-side IRQs.
-+ */
-+
-+#define MAX_PIRQS 8
-+static int pirq_entries [MAX_PIRQS];
-+static int pirqs_enabled;
 +int skip_ioapic_setup;
 +int ioapic_force;
 +
@@ -32768,20 +28565,18 @@
 +static int __init disable_ioapic_setup(char *str)
 +{
 +	skip_ioapic_setup = 1;
-+	return 1;
++	return 0;
 +}
++early_param("noapic", disable_ioapic_setup);
 +
-+static int __init enable_ioapic_setup(char *str)
++/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
++static int __init disable_timer_pin_setup(char *arg)
 +{
-+	ioapic_force = 1;
-+	skip_ioapic_setup = 0;
++	disable_timer_pin_1 = 1;
 +	return 1;
 +}
++__setup("disable_timer_pin_1", disable_timer_pin_setup);
 +
-+__setup("noapic", disable_ioapic_setup);
-+__setup("apic", enable_ioapic_setup);
-+
-+#ifndef CONFIG_XEN
 +static int __init setup_disable_8254_timer(char *s)
 +{
 +	timer_over_8254 = -1;
@@ -32795,139 +28590,7 @@
 +
 +__setup("disable_8254_timer", setup_disable_8254_timer);
 +__setup("enable_8254_timer", setup_enable_8254_timer);
-+#endif /* !CONFIG_XEN */
-+
-+#include <asm/pci-direct.h>
-+#include <linux/pci_ids.h>
-+#include <linux/pci.h>
-+
-+
-+#ifdef CONFIG_ACPI
-+
-+static int nvidia_hpet_detected __initdata;
-+
-+static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
-+{
-+	nvidia_hpet_detected = 1;
-+	return 0;
-+}
-+#endif
-+
-+/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
-+   off. Check for an Nvidia or VIA PCI bridge and turn it off.
-+   Use pci direct infrastructure because this runs before the PCI subsystem. 
-+
-+   Can be overwritten with "apic"
-+
-+   And another hack to disable the IOMMU on VIA chipsets.
-+
-+   ... and others. Really should move this somewhere else.
-+
-+   Kludge-O-Rama. */
-+void __init check_ioapic(void) 
-+{ 
-+	int num,slot,func; 
-+	/* Poor man's PCI discovery */
-+	for (num = 0; num < 32; num++) { 
-+		for (slot = 0; slot < 32; slot++) { 
-+			for (func = 0; func < 8; func++) { 
-+				u32 class;
-+				u32 vendor;
-+				u8 type;
-+				class = read_pci_config(num,slot,func,
-+							PCI_CLASS_REVISION);
-+				if (class == 0xffffffff)
-+					break; 
-+
-+		       		if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
-+					continue; 
-+
-+				vendor = read_pci_config(num, slot, func, 
-+							 PCI_VENDOR_ID);
-+				vendor &= 0xffff;
-+				switch (vendor) { 
-+				case PCI_VENDOR_ID_VIA:
-+#ifdef CONFIG_IOMMU
-+					if ((end_pfn > MAX_DMA32_PFN ||
-+					     force_iommu) &&
-+					    !iommu_aperture_allowed) {
-+						printk(KERN_INFO
-+    "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
-+						iommu_aperture_disabled = 1;
-+					}
-+#endif
-+					return;
-+				case PCI_VENDOR_ID_NVIDIA:
-+#ifdef CONFIG_ACPI
-+					/*
-+					 * All timer overrides on Nvidia are
-+					 * wrong unless HPET is enabled.
-+					 */
-+					nvidia_hpet_detected = 0;
-+					acpi_table_parse(ACPI_HPET,
-+							nvidia_hpet_check);
-+					if (nvidia_hpet_detected == 0) {
-+						acpi_skip_timer_override = 1;
-+						printk(KERN_INFO "Nvidia board "
-+						    "detected. Ignoring ACPI "
-+						    "timer override.\n");
-+					}
-+#endif
-+					/* RED-PEN skip them on mptables too? */
-+					return;
-+				case PCI_VENDOR_ID_ATI:
-+
-+				/* This should be actually default, but
-+				   for 2.6.16 let's do it for ATI only where
-+				   it's really needed. */
-+#ifndef CONFIG_XEN
-+					if (timer_over_8254 == 1) {	
-+						timer_over_8254 = 0;	
-+					printk(KERN_INFO
-+		"ATI board detected. Disabling timer routing over 8254.\n");
-+					}	
-+#endif
-+					return;
-+				} 
-+
-+
-+				/* No multi-function device? */
-+				type = read_pci_config_byte(num,slot,func,
-+							    PCI_HEADER_TYPE);
-+				if (!(type & 0x80))
-+					break;
-+			} 
-+		}
-+	}
-+} 
-+
-+static int __init ioapic_pirq_setup(char *str)
-+{
-+	int i, max;
-+	int ints[MAX_PIRQS+1];
-+
-+	get_options(str, ARRAY_SIZE(ints), ints);
-+
-+	for (i = 0; i < MAX_PIRQS; i++)
-+		pirq_entries[i] = -1;
-+
-+	pirqs_enabled = 1;
-+	apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
-+	max = MAX_PIRQS;
-+	if (ints[0] < MAX_PIRQS)
-+		max = ints[0];
-+
-+	for (i = 0; i < max; i++) {
-+		apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
-+		/*
-+		 * PIRQs are mapped upside down, usually.
-+		 */
-+		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
-+	}
-+	return 1;
-+}
 +
-+__setup("pirq=", ioapic_pirq_setup);
 +
 +/*
 + * Find the IRQ entry number of a certain pin.
@@ -32957,9 +28620,7 @@
 +	for (i = 0; i < mp_irq_entries; i++) {
 +		int lbus = mp_irqs[i].mpc_srcbus;
 +
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++		if (test_bit(lbus, mp_bus_not_pci) &&
 +		    (mp_irqs[i].mpc_irqtype == type) &&
 +		    (mp_irqs[i].mpc_srcbusirq == irq))
 +
@@ -32975,9 +28636,7 @@
 +	for (i = 0; i < mp_irq_entries; i++) {
 +		int lbus = mp_irqs[i].mpc_srcbus;
 +
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++		if (test_bit(lbus, mp_bus_not_pci) &&
 +		    (mp_irqs[i].mpc_irqtype == type) &&
 +		    (mp_irqs[i].mpc_srcbusirq == irq))
 +			break;
@@ -33018,7 +28677,7 @@
 +			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
 +				break;
 +
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++		if (!test_bit(lbus, mp_bus_not_pci) &&
 +		    !mp_irqs[i].mpc_irqtype &&
 +		    (bus == lbus) &&
 +		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
@@ -33041,27 +28700,6 @@
 +	return best_guess;
 +}
 +
-+/*
-+ * EISA Edge/Level control register, ELCR
-+ */
-+static int EISA_ELCR(unsigned int irq)
-+{
-+	if (irq < 16) {
-+		unsigned int port = 0x4d0 + (irq >> 3);
-+		return (inb(port) >> (irq & 7)) & 1;
-+	}
-+	apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
-+	return 0;
-+}
-+
-+/* EISA interrupts are always polarity zero and can be edge or level
-+ * trigger depending on the ELCR value.  If an interrupt is listed as
-+ * EISA conforming in the MP table, that means its trigger type must
-+ * be read in from the ELCR */
-+
-+#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
-+#define default_EISA_polarity(idx)	(0)
-+
 +/* ISA interrupts are always polarity zero edge triggered,
 + * when listed as conforming in the MP table. */
 +
@@ -33074,12 +28712,6 @@
 +#define default_PCI_trigger(idx)	(1)
 +#define default_PCI_polarity(idx)	(1)
 +
-+/* MCA interrupts are always polarity zero level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_MCA_trigger(idx)	(1)
-+#define default_MCA_polarity(idx)	(0)
-+
 +static int __init MPBIOS_polarity(int idx)
 +{
 +	int bus = mp_irqs[idx].mpc_srcbus;
@@ -33091,38 +28723,11 @@
 +	switch (mp_irqs[idx].mpc_irqflag & 3)
 +	{
 +		case 0: /* conforms, ie. bus-type dependent polarity */
-+		{
-+			switch (mp_bus_id_to_type[bus])
-+			{
-+				case MP_BUS_ISA: /* ISA pin */
-+				{
-+					polarity = default_ISA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_EISA: /* EISA pin */
-+				{
-+					polarity = default_EISA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_PCI: /* PCI pin */
-+				{
-+					polarity = default_PCI_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_MCA: /* MCA pin */
-+				{
-+					polarity = default_MCA_polarity(idx);
-+					break;
-+				}
-+				default:
-+				{
-+					printk(KERN_WARNING "broken BIOS!!\n");
-+					polarity = 1;
-+					break;
-+				}
-+			}
++			if (test_bit(bus, mp_bus_not_pci))
++				polarity = default_ISA_polarity(idx);
++			else
++				polarity = default_PCI_polarity(idx);
 +			break;
-+		}
 +		case 1: /* high active */
 +		{
 +			polarity = 0;
@@ -33160,38 +28765,11 @@
 +	switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
 +	{
 +		case 0: /* conforms, ie. bus-type dependent */
-+		{
-+			switch (mp_bus_id_to_type[bus])
-+			{
-+				case MP_BUS_ISA: /* ISA pin */
-+				{
-+					trigger = default_ISA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_EISA: /* EISA pin */
-+				{
-+					trigger = default_EISA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_PCI: /* PCI pin */
-+				{
-+					trigger = default_PCI_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_MCA: /* MCA pin */
-+				{
-+					trigger = default_MCA_trigger(idx);
-+					break;
-+				}
-+				default:
-+				{
-+					printk(KERN_WARNING "broken BIOS!!\n");
-+					trigger = 1;
-+					break;
-+				}
-+			}
++			if (test_bit(bus, mp_bus_not_pci))
++				trigger = default_ISA_trigger(idx);
++			else
++				trigger = default_PCI_trigger(idx);
 +			break;
-+		}
 +		case 1: /* edge */
 +		{
 +			trigger = 0;
@@ -33228,64 +28806,6 @@
 +	return MPBIOS_trigger(idx);
 +}
 +
-+static int next_irq = 16;
-+
-+/*
-+ * gsi_irq_sharing -- Name overload!  "irq" can be either a legacy IRQ
-+ * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
-+ * from ACPI, which can reach 800 in large boxen.
-+ *
-+ * Compact the sparse GSI space into a sequential IRQ series and reuse
-+ * vectors if possible.
-+ */
-+int gsi_irq_sharing(int gsi)
-+{
-+	int i, tries, vector;
-+
-+	BUG_ON(gsi >= NR_IRQ_VECTORS);
-+
-+	if (platform_legacy_irq(gsi))
-+		return gsi;
-+
-+	if (gsi_2_irq[gsi] != 0xFF)
-+		return (int)gsi_2_irq[gsi];
-+
-+	tries = NR_IRQS;
-+  try_again:
-+	vector = assign_irq_vector(gsi);
-+
-+	/*
-+	 * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
-+	 * use of vector and if found, return that IRQ.  However, we never want
-+	 * to share legacy IRQs, which usually have a different trigger mode
-+	 * than PCI.
-+	 */
-+	for (i = 0; i < NR_IRQS; i++)
-+		if (IO_APIC_VECTOR(i) == vector)
-+			break;
-+	if (platform_legacy_irq(i)) {
-+		if (--tries >= 0) {
-+			IO_APIC_VECTOR(i) = 0;
-+			goto try_again;
-+		}
-+		panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
-+	}
-+	if (i < NR_IRQS) {
-+		gsi_2_irq[gsi] = i;
-+		printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
-+				gsi, vector, i);
-+		return i;
-+	}
-+
-+	i = next_irq++;
-+	BUG_ON(i >= NR_IRQS);
-+	gsi_2_irq[gsi] = i;
-+	IO_APIC_VECTOR(i) = vector;
-+	printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
-+			gsi, vector, i);
-+	return i;
-+}
-+
 +static int pin_2_irq(int idx, int apic, int pin)
 +{
 +	int irq, i;
@@ -33297,49 +28817,16 @@
 +	if (mp_irqs[idx].mpc_dstirq != pin)
 +		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
 +
-+	switch (mp_bus_id_to_type[bus])
-+	{
-+		case MP_BUS_ISA: /* ISA pin */
-+		case MP_BUS_EISA:
-+		case MP_BUS_MCA:
-+		{
-+			irq = mp_irqs[idx].mpc_srcbusirq;
-+			break;
-+		}
-+		case MP_BUS_PCI: /* PCI pin */
-+		{
-+			/*
-+			 * PCI IRQs are mapped in order
-+			 */
-+			i = irq = 0;
-+			while (i < apic)
-+				irq += nr_ioapic_registers[i++];
-+			irq += pin;
-+			irq = gsi_irq_sharing(irq);
-+			break;
-+		}
-+		default:
-+		{
-+			printk(KERN_ERR "unknown bus type %d.\n",bus); 
-+			irq = 0;
-+			break;
-+		}
-+	}
-+	BUG_ON(irq >= NR_IRQS);
-+
-+	/*
-+	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
-+	 */
-+	if ((pin >= 16) && (pin <= 23)) {
-+		if (pirq_entries[pin-16] != -1) {
-+			if (!pirq_entries[pin-16]) {
-+				apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
-+			} else {
-+				irq = pirq_entries[pin-16];
-+				apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
-+						pin-16, irq);
-+			}
-+		}
++	if (test_bit(bus, mp_bus_not_pci)) {
++		irq = mp_irqs[idx].mpc_srcbusirq;
++	} else {
++		/*
++		 * PCI IRQs are mapped in order
++		 */
++		i = irq = 0;
++		while (i < apic)
++			irq += nr_ioapic_registers[i++];
++		irq += pin;
 +	}
 +	BUG_ON(irq >= NR_IRQS);
 +	return irq;
@@ -33363,37 +28850,106 @@
 +}
 +
 +/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-+u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
++static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
 +
-+int assign_irq_vector(int irq)
++static cpumask_t irq_domain[NR_IRQ_VECTORS] __read_mostly = {
++	[0] = CPU_MASK_ALL,
++	[1] = CPU_MASK_ALL,
++	[2] = CPU_MASK_ALL,
++	[3] = CPU_MASK_ALL,
++	[4] = CPU_MASK_ALL,
++	[5] = CPU_MASK_ALL,
++	[6] = CPU_MASK_ALL,
++	[7] = CPU_MASK_ALL,
++	[8] = CPU_MASK_ALL,
++	[9] = CPU_MASK_ALL,
++	[10] = CPU_MASK_ALL,
++	[11] = CPU_MASK_ALL,
++	[12] = CPU_MASK_ALL,
++	[13] = CPU_MASK_ALL,
++	[14] = CPU_MASK_ALL,
++	[15] = CPU_MASK_ALL,
++};
++
++
++static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
 +{
 +	struct physdev_irq irq_op;
-+	unsigned long flags;
-+
-+  	BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++	int vector;
 +
-+	spin_lock_irqsave(&vector_lock, flags);
++	BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
 +
-+	if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
-+		spin_unlock_irqrestore(&vector_lock, flags);
-+  		return IO_APIC_VECTOR(irq);
++	if (irq_vector[irq] > 0) {
++		cpus_and(*result, irq_domain[irq], mask);
++		return irq_vector[irq];
 +	}
 +	irq_op.irq = irq;
 +	if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
 +		return -ENOSPC;
 +
-+	vector_irq[irq_op.vector] = irq;
-+	if (irq != AUTO_ASSIGN)
-+		IO_APIC_VECTOR(irq) = irq_op.vector;
++	vector = irq_op.vector;
++	per_cpu(vector_irq,0)[vector] = irq;
++	irq_vector[irq] = vector;
++	cpus_and(*result, irq_domain[irq], mask);
++
++	return vector;
++}
++
++static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
++{
++	int vector;
++	unsigned long flags;
 +
++	spin_lock_irqsave(&vector_lock, flags);
++	vector = __assign_irq_vector(irq, mask, result);
 +	spin_unlock_irqrestore(&vector_lock, flags);
-+	return irq_op.vector;
++	return vector;
++}
++
++static void __clear_irq_vector(int irq)
++{
++	cpumask_t mask;
++	int cpu, vector;
++
++	BUG_ON(!irq_vector[irq]);
++
++	vector = irq_vector[irq];
++	cpus_and(mask, irq_domain[irq], cpu_online_map);
++	for_each_cpu_mask(cpu, mask)
++		per_cpu(vector_irq, cpu)[vector] = -1;
++
++	irq_vector[irq] = 0;
++	irq_domain[irq] = CPU_MASK_NONE;
++}
++
++void __setup_vector_irq(int cpu)
++{
++	/* Initialize vector_irq on a new cpu */
++	/* This function must be called with vector_lock held */
++	int irq, vector;
++
++	/* Mark the inuse vectors */
++	for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) {
++		if (!cpu_isset(cpu, irq_domain[irq]))
++			continue;
++		vector = irq_vector[irq];
++		per_cpu(vector_irq, cpu)[vector] = irq;
++	}
++	/* Mark the free vectors */
++	for (vector = 0; vector < NR_VECTORS; ++vector) {
++		irq = per_cpu(vector_irq, cpu)[vector];
++		if (irq < 0)
++			continue;
++		if (!cpu_isset(cpu, irq_domain[irq]))
++			per_cpu(vector_irq, cpu)[vector] = -1;
++	}
 +}
 +
++
 +extern void (*interrupt[NR_IRQS])(void);
++
 +#ifndef CONFIG_XEN
-+static struct hw_interrupt_type ioapic_level_type;
-+static struct hw_interrupt_type ioapic_edge_type;
++static struct irq_chip ioapic_chip;
 +
 +#define IOAPIC_AUTO	-1
 +#define IOAPIC_EDGE	0
@@ -33401,41 +28957,79 @@
 +
 +static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
 +{
-+	unsigned idx;
-+
-+	idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
-+
 +	if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
 +			trigger == IOAPIC_LEVEL)
-+		irq_desc[idx].chip = &ioapic_level_type;
-+	else
-+		irq_desc[idx].chip = &ioapic_edge_type;
-+	set_intr_gate(vector, interrupt[idx]);
++		set_irq_chip_and_handler_name(irq, &ioapic_chip,
++					      handle_fasteoi_irq, "fasteoi");
++	else {
++		irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
++		set_irq_chip_and_handler_name(irq, &ioapic_chip,
++					      handle_edge_irq, "edge");
++	}
 +}
 +#else
 +#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
 +#endif /* !CONFIG_XEN */
 +
-+static void __init setup_IO_APIC_irqs(void)
++static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
 +{
 +	struct IO_APIC_route_entry entry;
-+	int apic, pin, idx, irq, first_notcon = 1, vector;
++	int vector;
 +	unsigned long flags;
 +
-+	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
 +
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++	/*
++	 * add it to the IO-APIC irq-routing table:
++	 */
++	memset(&entry,0,sizeof(entry));
 +
-+		/*
-+		 * add it to the IO-APIC irq-routing table:
-+		 */
-+		memset(&entry,0,sizeof(entry));
++	entry.delivery_mode = INT_DELIVERY_MODE;
++	entry.dest_mode = INT_DEST_MODE;
++	entry.mask = 0;				/* enable IRQ */
++	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
 +
-+		entry.delivery_mode = INT_DELIVERY_MODE;
-+		entry.dest_mode = INT_DEST_MODE;
-+		entry.mask = 0;				/* enable IRQ */
++	entry.trigger = irq_trigger(idx);
++	entry.polarity = irq_polarity(idx);
++
++	if (irq_trigger(idx)) {
++		entry.trigger = 1;
++		entry.mask = 1;
 +		entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++	}
++
++	if (!apic && !IO_APIC_IRQ(irq))
++		return;
++
++	if (IO_APIC_IRQ(irq)) {
++		cpumask_t mask;
++		vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
++		if (vector < 0)
++			return;
++
++		entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
++		entry.vector = vector;
++
++		ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++		if (!apic && (irq < 16))
++			disable_8259A_irq(irq);
++	}
++
++	ioapic_write_entry(apic, pin, entry);
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	set_native_irq_info(irq, TARGET_CPUS);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++}
++
++static void __init setup_IO_APIC_irqs(void)
++{
++	int apic, pin, idx, irq, first_notcon = 1;
++
++	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++	for (apic = 0; apic < nr_ioapics; apic++) {
++	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
 +
 +		idx = find_irq_entry(apic,pin,mp_INT);
 +		if (idx == -1) {
@@ -33447,34 +29041,11 @@
 +			continue;
 +		}
 +
-+		entry.trigger = irq_trigger(idx);
-+		entry.polarity = irq_polarity(idx);
-+
-+		if (irq_trigger(idx)) {
-+			entry.trigger = 1;
-+			entry.mask = 1;
-+			entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+		}
-+
 +		irq = pin_2_irq(idx, apic, pin);
 +		add_pin_to_irq(irq, apic, pin);
 +
-+		if (/* !apic && */ !IO_APIC_IRQ(irq))
-+			continue;
-+
-+		if (IO_APIC_IRQ(irq)) {
-+			vector = assign_irq_vector(irq);
-+			entry.vector = vector;
++		setup_IO_APIC_irq(apic, pin, idx, irq);
 +
-+			ioapic_register_intr(irq, vector, IOAPIC_AUTO);
-+			if (!apic && (irq < 16))
-+				disable_8259A_irq(irq);
-+		}
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+		io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+		set_native_irq_info(irq, TARGET_CPUS);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
 +	}
 +	}
 +
@@ -33515,7 +29086,7 @@
 +	 * The timer IRQ doesn't have to know that behind the
 +	 * scene we have a 8259A-master in AEOI mode ...
 +	 */
-+	irq_desc[0].chip = &ioapic_edge_type;
++	set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
 +
 +	/*
 +	 * Add it to the IO-APIC irq-routing table:
@@ -33611,10 +29182,7 @@
 +	for (i = 0; i <= reg_01.bits.entries; i++) {
 +		struct IO_APIC_route_entry entry;
 +
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		*(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
-+		*(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
++		entry = ioapic_read_entry(apic, i);
 +
 +		printk(KERN_DEBUG " %02x %03X %02X  ",
 +			i,
@@ -33634,17 +29202,12 @@
 +		);
 +	}
 +	}
-+	if (use_pci_vector())
-+		printk(KERN_INFO "Using vector-based indexing\n");
 +	printk(KERN_DEBUG "IRQ to pin mappings:\n");
 +	for (i = 0; i < NR_IRQS; i++) {
 +		struct irq_pin_list *entry = irq_2_pin + i;
 +		if (entry->pin < 0)
 +			continue;
-+ 		if (use_pci_vector() && !platform_legacy_irq(i))
-+			printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
-+		else
-+			printk(KERN_DEBUG "IRQ%d ", i);
++		printk(KERN_DEBUG "IRQ%d ", i);
 +		for (;;) {
 +			printk("-> %d:%d", entry->apic, entry->pin);
 +			if (!entry->next)
@@ -33815,9 +29378,6 @@
 +		irq_2_pin[i].pin = -1;
 +		irq_2_pin[i].next = 0;
 +	}
-+	if (!pirqs_enabled)
-+		for (i = 0; i < MAX_PIRQS; i++)
-+			pirq_entries[i] = -1;
 +
 +	/*
 +	 * The number of IO-APIC IRQ registers (== #pins):
@@ -33834,11 +29394,7 @@
 +		/* See if any of the pins is in ExtINT mode */
 +		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
 +			struct IO_APIC_route_entry entry;
-+			spin_lock_irqsave(&ioapic_lock, flags);
-+			*(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+			*(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+			spin_unlock_irqrestore(&ioapic_lock, flags);
-+
++			entry = ioapic_read_entry(apic, pin);
 +
 +			/* If the interrupt line is enabled and in ExtInt mode
 +			 * I have found the pin where the i8259 is connected.
@@ -33892,7 +29448,6 @@
 +	 */
 +	if (ioapic_i8259.pin != -1) {
 +		struct IO_APIC_route_entry entry;
-+		unsigned long flags;
 +
 +		memset(&entry, 0, sizeof(entry));
 +		entry.mask            = 0; /* Enabled */
@@ -33909,12 +29464,7 @@
 +		/*
 +		 * Add it to the IO-APIC irq-routing table:
 +		 */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
-+			*(((int *)&entry)+1));
-+		io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
-+			*(((int *)&entry)+0));
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
++		ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
 +	}
 +
 +	disconnect_bsp_APIC(ioapic_i8259.pin != -1);
@@ -33922,76 +29472,6 @@
 +}
 +
 +/*
-+ * function to set the IO-APIC physical IDs based on the
-+ * values stored in the MPC table.
-+ *
-+ * by Matt Domsch <Matt_Domsch at dell.com>  Tue Dec 21 12:25:05 CST 1999
-+ */
-+
-+#ifndef CONFIG_XEN
-+static void __init setup_ioapic_ids_from_mpc (void)
-+{
-+	union IO_APIC_reg_00 reg_00;
-+	int apic;
-+	int i;
-+	unsigned char old_id;
-+	unsigned long flags;
-+
-+	/*
-+	 * Set the IOAPIC ID to the value stored in the MPC table.
-+	 */
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+		/* Read the register 0 value */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_00.raw = io_apic_read(apic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		
-+		old_id = mp_ioapics[apic].mpc_apicid;
-+
-+
-+		printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
-+
-+
-+		/*
-+		 * We need to adjust the IRQ routing table
-+		 * if the ID changed.
-+		 */
-+		if (old_id != mp_ioapics[apic].mpc_apicid)
-+			for (i = 0; i < mp_irq_entries; i++)
-+				if (mp_irqs[i].mpc_dstapic == old_id)
-+					mp_irqs[i].mpc_dstapic
-+						= mp_ioapics[apic].mpc_apicid;
-+
-+		/*
-+		 * Read the right value from the MPC table and
-+		 * write it into the ID register.
-+	 	 */
-+		apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
-+				mp_ioapics[apic].mpc_apicid);
-+
-+		reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(apic, 0, reg_00.raw);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+		/*
-+		 * Sanity check
-+		 */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_00.raw = io_apic_read(apic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
-+			printk("could not set ID!\n");
-+		else
-+			apic_printk(APIC_VERBOSE," ok.\n");
-+	}
-+}
-+#else
-+static void __init setup_ioapic_ids_from_mpc(void) { }
-+#endif
-+
-+/*
 + * There is a nasty bug in some older SMP boards, their mptable lies
 + * about the timer IRQ. We do the following to work around the situation:
 + *
@@ -34045,7 +29525,7 @@
 + * an edge even if it isn't on the 8259A...
 + */
 +
-+static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++static unsigned int startup_ioapic_irq(unsigned int irq)
 +{
 +	int was_pending = 0;
 +	unsigned long flags;
@@ -34062,107 +29542,19 @@
 +	return was_pending;
 +}
 +
-+/*
-+ * Once we have recorded IRQ_PENDING already, we can mask the
-+ * interrupt for real. This prevents IRQ storms from unhandled
-+ * devices.
-+ */
-+static void ack_edge_ioapic_irq(unsigned int irq)
-+{
-+	move_irq(irq);
-+	if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
-+					== (IRQ_PENDING | IRQ_DISABLED))
-+		mask_IO_APIC_irq(irq);
-+	ack_APIC_irq();
-+}
-+
-+/*
-+ * Level triggered interrupts can just be masked,
-+ * and shutting down and starting up the interrupt
-+ * is the same as enabling and disabling them -- except
-+ * with a startup need to return a "was pending" value.
-+ *
-+ * Level triggered interrupts are special because we
-+ * do not touch any IO-APIC register while handling
-+ * them. We ack the APIC in the end-IRQ handler, not
-+ * in the start-IRQ-handler. Protection against reentrance
-+ * from the same interrupt is still provided, both by the
-+ * generic IRQ layer and by the fact that an unacked local
-+ * APIC does not accept IRQs.
-+ */
-+static unsigned int startup_level_ioapic_irq (unsigned int irq)
-+{
-+	unmask_IO_APIC_irq(irq);
-+
-+	return 0; /* don't check for pending */
-+}
-+
-+static void end_level_ioapic_irq (unsigned int irq)
-+{
-+	move_irq(irq);
-+	ack_APIC_irq();
-+}
-+
-+#ifdef CONFIG_PCI_MSI
-+static unsigned int startup_edge_ioapic_vector(unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	return startup_edge_ioapic_irq(irq);
-+}
-+
-+static void ack_edge_ioapic_vector(unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	move_native_irq(vector);
-+	ack_edge_ioapic_irq(irq);
-+}
-+
-+static unsigned int startup_level_ioapic_vector (unsigned int vector)
++static int ioapic_retrigger_irq(unsigned int irq)
 +{
-+	int irq = vector_to_irq(vector);
-+
-+	return startup_level_ioapic_irq (irq);
-+}
-+
-+static void end_level_ioapic_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	move_native_irq(vector);
-+	end_level_ioapic_irq(irq);
-+}
-+
-+static void mask_IO_APIC_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	mask_IO_APIC_irq(irq);
-+}
-+
-+static void unmask_IO_APIC_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	unmask_IO_APIC_irq(irq);
-+}
-+
-+#ifdef CONFIG_SMP
-+static void set_ioapic_affinity_vector (unsigned int vector,
-+					cpumask_t cpu_mask)
-+{
-+	int irq = vector_to_irq(vector);
++	cpumask_t mask;
++	unsigned vector;
++	unsigned long flags;
 +
-+	set_native_irq_info(vector, cpu_mask);
-+	set_ioapic_affinity_irq(irq, cpu_mask);
-+}
-+#endif // CONFIG_SMP
-+#endif // CONFIG_PCI_MSI
++	spin_lock_irqsave(&vector_lock, flags);
++	vector = irq_vector[irq];
++	cpus_clear(mask);
++	cpu_set(first_cpu(irq_domain[irq]), mask);
 +
-+static int ioapic_retrigger(unsigned int irq)
-+{
-+	send_IPI_self(IO_APIC_VECTOR(irq));
++	send_IPI_mask(mask, vector);
++	spin_unlock_irqrestore(&vector_lock, flags);
 +
 +	return 1;
 +}
@@ -34176,32 +29568,47 @@
 + * races.
 + */
 +
-+static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
-+	.typename = "IO-APIC-edge",
-+	.startup 	= startup_edge_ioapic,
-+	.shutdown 	= shutdown_edge_ioapic,
-+	.enable 	= enable_edge_ioapic,
-+	.disable 	= disable_edge_ioapic,
-+	.ack 		= ack_edge_ioapic,
-+	.end 		= end_edge_ioapic,
-+#ifdef CONFIG_SMP
-+	.set_affinity = set_ioapic_affinity,
++static void ack_apic_edge(unsigned int irq)
++{
++	move_native_irq(irq);
++	ack_APIC_irq();
++}
++
++static void ack_apic_level(unsigned int irq)
++{
++	int do_unmask_irq = 0;
++
++#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
++	/* If we are moving the irq we need to mask it */
++	if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
++		do_unmask_irq = 1;
++		mask_IO_APIC_irq(irq);
++	}
 +#endif
-+	.retrigger	= ioapic_retrigger,
-+};
 +
-+static struct hw_interrupt_type ioapic_level_type __read_mostly = {
-+	.typename = "IO-APIC-level",
-+	.startup 	= startup_level_ioapic,
-+	.shutdown 	= shutdown_level_ioapic,
-+	.enable 	= enable_level_ioapic,
-+	.disable 	= disable_level_ioapic,
-+	.ack 		= mask_and_ack_level_ioapic,
-+	.end 		= end_level_ioapic,
++	/*
++	 * We must acknowledge the irq before we move it or the acknowledge will
++	 * not propogate properly.
++	 */
++	ack_APIC_irq();
++
++	/* Now we can move and renable the irq */
++	move_masked_irq(irq);
++	if (unlikely(do_unmask_irq))
++		unmask_IO_APIC_irq(irq);
++}
++
++static struct irq_chip ioapic_chip __read_mostly = {
++	.name 		= "IO-APIC",
++	.startup 	= startup_ioapic_irq,
++	.mask	 	= mask_IO_APIC_irq,
++	.unmask	 	= unmask_IO_APIC_irq,
++	.ack 		= ack_apic_edge,
++	.eoi 		= ack_apic_level,
 +#ifdef CONFIG_SMP
-+	.set_affinity = set_ioapic_affinity,
++	.set_affinity 	= set_ioapic_affinity_irq,
 +#endif
-+	.retrigger	= ioapic_retrigger,
++	.retrigger	= ioapic_retrigger_irq,
 +};
 +#endif /* !CONFIG_XEN */
 +
@@ -34222,12 +29629,7 @@
 +	 */
 +	for (irq = 0; irq < NR_IRQS ; irq++) {
 +		int tmp = irq;
-+		if (use_pci_vector()) {
-+			if (!platform_legacy_irq(tmp))
-+				if ((tmp = vector_to_irq(tmp)) == -1)
-+					continue;
-+		}
-+		if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++		if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) {
 +			/*
 +			 * Hmm.. We don't have an entry for this,
 +			 * so default to an old-fashioned 8259
@@ -34238,7 +29640,7 @@
 +#ifndef CONFIG_XEN
 +			else
 +				/* Strange. Oh, well.. */
-+				irq_desc[irq].chip = &no_irq_type;
++				irq_desc[irq].chip = &no_irq_chip;
 +#endif
 +		}
 +	}
@@ -34359,8 +29761,6 @@
 +	spin_unlock_irqrestore(&ioapic_lock, flags);
 +}
 +
-+int timer_uses_ioapic_pin_0;
-+
 +/*
 + * This code may look a bit paranoid, but it's supposed to cooperate with
 + * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
@@ -34373,13 +29773,13 @@
 +{
 +	int apic1, pin1, apic2, pin2;
 +	int vector;
++	cpumask_t mask;
 +
 +	/*
 +	 * get/set the timer IRQ vector:
 +	 */
 +	disable_8259A_irq(0);
-+	vector = assign_irq_vector(0);
-+	set_intr_gate(vector, interrupt[0]);
++	vector = assign_irq_vector(0, TARGET_CPUS, &mask);
 +
 +	/*
 +	 * Subtle, code in do_timer_interrupt() expects an AEOI
@@ -34398,9 +29798,6 @@
 +	pin2  = ioapic_i8259.pin;
 +	apic2 = ioapic_i8259.apic;
 +
-+	if (pin1 == 0)
-+		timer_uses_ioapic_pin_0 = 1;
-+
 +	apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
 +		vector, apic1, pin1, apic2, pin2);
 +
@@ -34484,7 +29881,6 @@
 +	panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
 +}
 +#else
-+int timer_uses_ioapic_pin_0;
 +#define check_timer() ((void)0)
 +#endif /* !CONFIG_XEN */
 +
@@ -34515,11 +29911,6 @@
 +
 +	apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
 +
-+	/*
-+	 * Set up the IO-APIC IRQ routing table.
-+	 */
-+	if (!acpi_ioapic)
-+		setup_ioapic_ids_from_mpc();
 +#ifndef CONFIG_XEN
 +	sync_Arb_IDs();
 +#endif /* !CONFIG_XEN */
@@ -34540,17 +29931,12 @@
 +{
 +	struct IO_APIC_route_entry *entry;
 +	struct sysfs_ioapic_data *data;
-+	unsigned long flags;
 +	int i;
 +
 +	data = container_of(dev, struct sysfs_ioapic_data, dev);
 +	entry = data->entry;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+		*(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
-+		*(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
-+	}
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
++		*entry = ioapic_read_entry(dev->id, i);
 +
 +	return 0;
 +}
@@ -34572,11 +29958,9 @@
 +		reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
 +		io_apic_write(dev->id, 0, reg_00.raw);
 +	}
-+	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+		io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
-+		io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
-+	}
 +	spin_unlock_irqrestore(&ioapic_lock, flags);
++	for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
++		ioapic_write_entry(dev->id, i, entry[i]);
 +
 +	return 0;
 +}
@@ -34622,26 +30006,253 @@
 +
 +device_initcall(ioapic_init_sysfs);
 +
-+/* --------------------------------------------------------------------------
-+                          ACPI-based IOAPIC Configuration
-+   -------------------------------------------------------------------------- */
++/*
++ * Dynamic irq allocate and deallocation
++ */
++int create_irq(void)
++{
++	/* Allocate an unused irq */
++	int irq;
++	int new;
++	int vector = 0;
++	unsigned long flags;
++	cpumask_t mask;
 +
-+#ifdef CONFIG_ACPI
++	irq = -ENOSPC;
++	spin_lock_irqsave(&vector_lock, flags);
++	for (new = (NR_IRQS - 1); new >= 0; new--) {
++		if (platform_legacy_irq(new))
++			continue;
++		if (irq_vector[new] != 0)
++			continue;
++		vector = __assign_irq_vector(new, TARGET_CPUS, &mask);
++		if (likely(vector > 0))
++			irq = new;
++		break;
++	}
++	spin_unlock_irqrestore(&vector_lock, flags);
 +
-+#define IO_APIC_MAX_ID		0xFE
++	if (irq >= 0) {
++		dynamic_irq_init(irq);
++	}
++	return irq;
++}
 +
-+int __init io_apic_get_version (int ioapic)
++void destroy_irq(unsigned int irq)
 +{
-+	union IO_APIC_reg_01	reg_01;
 +	unsigned long flags;
 +
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_01.raw = io_apic_read(ioapic, 1);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
++	dynamic_irq_cleanup(irq);
 +
-+	return reg_01.bits.version;
++	spin_lock_irqsave(&vector_lock, flags);
++	__clear_irq_vector(irq);
++	spin_unlock_irqrestore(&vector_lock, flags);
++}
++
++/*
++ * MSI mesage composition
++ */
++#ifndef CONFIG_XEN
++#ifdef CONFIG_PCI_MSI
++static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
++{
++	int vector;
++	unsigned dest;
++	cpumask_t tmp;
++
++	vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
++	if (vector >= 0) {
++		dest = cpu_mask_to_apicid(tmp);
++
++		msg->address_hi = MSI_ADDR_BASE_HI;
++		msg->address_lo =
++			MSI_ADDR_BASE_LO |
++			((INT_DEST_MODE == 0) ?
++				MSI_ADDR_DEST_MODE_PHYSICAL:
++				MSI_ADDR_DEST_MODE_LOGICAL) |
++			((INT_DELIVERY_MODE != dest_LowestPrio) ?
++				MSI_ADDR_REDIRECTION_CPU:
++				MSI_ADDR_REDIRECTION_LOWPRI) |
++			MSI_ADDR_DEST_ID(dest);
++
++		msg->data =
++			MSI_DATA_TRIGGER_EDGE |
++			MSI_DATA_LEVEL_ASSERT |
++			((INT_DELIVERY_MODE != dest_LowestPrio) ?
++				MSI_DATA_DELIVERY_FIXED:
++				MSI_DATA_DELIVERY_LOWPRI) |
++			MSI_DATA_VECTOR(vector);
++	}
++	return vector;
++}
++
++#ifdef CONFIG_SMP
++static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++	struct msi_msg msg;
++	unsigned int dest;
++	cpumask_t tmp;
++	int vector;
++
++	cpus_and(tmp, mask, cpu_online_map);
++	if (cpus_empty(tmp))
++		tmp = TARGET_CPUS;
++
++	cpus_and(mask, tmp, CPU_MASK_ALL);
++
++	vector = assign_irq_vector(irq, mask, &tmp);
++	if (vector < 0)
++		return;
++
++	dest = cpu_mask_to_apicid(tmp);
++
++	read_msi_msg(irq, &msg);
++
++	msg.data &= ~MSI_DATA_VECTOR_MASK;
++	msg.data |= MSI_DATA_VECTOR(vector);
++	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
++	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
++
++	write_msi_msg(irq, &msg);
++	set_native_irq_info(irq, mask);
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
++ * which implement the MSI or MSI-X Capability Structure.
++ */
++static struct irq_chip msi_chip = {
++	.name		= "PCI-MSI",
++	.unmask		= unmask_msi_irq,
++	.mask		= mask_msi_irq,
++	.ack		= ack_apic_edge,
++#ifdef CONFIG_SMP
++	.set_affinity	= set_msi_irq_affinity,
++#endif
++	.retrigger	= ioapic_retrigger_irq,
++};
++
++int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
++{
++	struct msi_msg msg;
++	int ret;
++	ret = msi_compose_msg(dev, irq, &msg);
++	if (ret < 0)
++		return ret;
++
++	write_msi_msg(irq, &msg);
++
++	set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
++
++	return 0;
++}
++
++void arch_teardown_msi_irq(unsigned int irq)
++{
++	return;
++}
++
++#endif /* CONFIG_PCI_MSI */
++#endif /* !CONFIG_XEN */
++/*
++ * Hypertransport interrupt support
++ */
++#ifdef CONFIG_HT_IRQ
++
++#ifdef CONFIG_SMP
++
++static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
++{
++	struct ht_irq_msg msg;
++	fetch_ht_irq_msg(irq, &msg);
++
++	msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
++	msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
++
++	msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
++	msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
++
++	write_ht_irq_msg(irq, &msg);
++}
++
++static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++	unsigned int dest;
++	cpumask_t tmp;
++	int vector;
++
++	cpus_and(tmp, mask, cpu_online_map);
++	if (cpus_empty(tmp))
++		tmp = TARGET_CPUS;
++
++	cpus_and(mask, tmp, CPU_MASK_ALL);
++
++	vector = assign_irq_vector(irq, mask, &tmp);
++	if (vector < 0)
++		return;
++
++	dest = cpu_mask_to_apicid(tmp);
++
++	target_ht_irq(irq, dest, vector);
++	set_native_irq_info(irq, mask);
++}
++#endif
++
++static struct irq_chip ht_irq_chip = {
++	.name		= "PCI-HT",
++	.mask		= mask_ht_irq,
++	.unmask		= unmask_ht_irq,
++	.ack		= ack_apic_edge,
++#ifdef CONFIG_SMP
++	.set_affinity	= set_ht_irq_affinity,
++#endif
++	.retrigger	= ioapic_retrigger_irq,
++};
++
++int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
++{
++	int vector;
++	cpumask_t tmp;
++
++	vector = assign_irq_vector(irq, TARGET_CPUS, &tmp);
++	if (vector >= 0) {
++		struct ht_irq_msg msg;
++		unsigned dest;
++
++		dest = cpu_mask_to_apicid(tmp);
++
++		msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
++
++		msg.address_lo =
++			HT_IRQ_LOW_BASE |
++			HT_IRQ_LOW_DEST_ID(dest) |
++			HT_IRQ_LOW_VECTOR(vector) |
++			((INT_DEST_MODE == 0) ?
++				HT_IRQ_LOW_DM_PHYSICAL :
++				HT_IRQ_LOW_DM_LOGICAL) |
++			HT_IRQ_LOW_RQEOI_EDGE |
++			((INT_DELIVERY_MODE != dest_LowestPrio) ?
++				HT_IRQ_LOW_MT_FIXED :
++				HT_IRQ_LOW_MT_ARBITRATED) |
++			HT_IRQ_LOW_IRQ_MASKED;
++
++		write_ht_irq_msg(irq, &msg);
++
++		set_irq_chip_and_handler_name(irq, &ht_irq_chip,
++					      handle_edge_irq, "edge");
++	}
++	return vector;
 +}
++#endif /* CONFIG_HT_IRQ */
++
++/* --------------------------------------------------------------------------
++                          ACPI-based IOAPIC Configuration
++   -------------------------------------------------------------------------- */
 +
++#ifdef CONFIG_ACPI
++
++#define IO_APIC_MAX_ID		0xFE
 +
 +int __init io_apic_get_redir_entries (int ioapic)
 +{
@@ -34656,10 +30267,12 @@
 +}
 +
 +
-+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
 +{
 +	struct IO_APIC_route_entry entry;
 +	unsigned long flags;
++	int vector;
++	cpumask_t mask;
 +
 +	if (!IO_APIC_IRQ(irq)) {
 +		apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
@@ -34668,6 +30281,17 @@
 +	}
 +
 +	/*
++	 * IRQs < 16 are already in the irq_2_pin[] map
++	 */
++	if (irq >= 16)
++		add_pin_to_irq(irq, ioapic, pin);
++
++
++	vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
++	if (vector < 0)
++		return vector;
++
++	/*
 +	 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
 +	 * Note that we mask (disable) IRQs now -- these get enabled when the
 +	 * corresponding device driver registers for this IRQ.
@@ -34677,34 +30301,26 @@
 +
 +	entry.delivery_mode = INT_DELIVERY_MODE;
 +	entry.dest_mode = INT_DEST_MODE;
-+	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+	entry.trigger = edge_level;
-+	entry.polarity = active_high_low;
++	entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
++	entry.trigger = triggering;
++	entry.polarity = polarity;
 +	entry.mask = 1;					 /* Disabled (masked) */
-+
-+	irq = gsi_irq_sharing(irq);
-+	/*
-+	 * IRQs < 16 are already in the irq_2_pin[] map
-+	 */
-+	if (irq >= 16)
-+		add_pin_to_irq(irq, ioapic, pin);
-+
-+	entry.vector = assign_irq_vector(irq);
++	entry.vector = vector & 0xff;
 +
 +	apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
 +		"IRQ %d Mode:%i Active:%i)\n", ioapic, 
 +	       mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
-+	       edge_level, active_high_low);
++	       triggering, polarity);
 +
-+	ioapic_register_intr(irq, entry.vector, edge_level);
++	ioapic_register_intr(irq, entry.vector, triggering);
 +
 +	if (!ioapic && (irq < 16))
 +		disable_8259A_irq(irq);
 +
++	ioapic_write_entry(ioapic, pin, entry);
++
 +	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
-+	io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
-+	set_native_irq_info(use_pci_vector() ?  entry.vector : irq, TARGET_CPUS);
++	set_native_irq_info(irq, TARGET_CPUS);
 +	spin_unlock_irqrestore(&ioapic_lock, flags);
 +
 +	return 0;
@@ -34733,16 +30349,24 @@
 +			if (irq_entry == -1)
 +				continue;
 +			irq = pin_2_irq(irq_entry, ioapic, pin);
-+			set_ioapic_affinity_irq(irq, TARGET_CPUS);
++
++			/* setup_IO_APIC_irqs could fail to get vector for some device
++			 * when you have too many devices, because at that time only boot
++			 * cpu is online.
++			 */
++			if(!irq_vector[irq])
++				setup_IO_APIC_irq(ioapic, pin, irq_entry, irq);
++			else
++				set_ioapic_affinity_irq(irq, TARGET_CPUS);
 +		}
 +
 +	}
 +}
 +#endif
 +#endif /* !CONFIG_XEN */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/ioport-xen.c linux-2.6.18-xen/arch/x86_64/kernel/ioport-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/ioport-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/ioport-xen.c	2006-11-19 14:26:33.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/ioport-xen.c b/arch/x86_64/kernel/ioport-xen.c
+--- a/arch/x86_64/kernel/ioport-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/ioport-xen.c	2007-03-14 10:55:21.000000000 +0100
 @@ -0,0 +1,99 @@
 +/*
 + *	linux/arch/x86_64/kernel/ioport.c
@@ -34769,13 +30393,12 @@
 +static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
 +{
 +	int i;
-+
-+	if (new_value)
-+		for (i = base; i < base + extent; i++)
-+			__set_bit(i, bitmap);
-+	else
-+		for (i = base; i < base + extent; i++)
-+			clear_bit(i, bitmap);
++		if (new_value)
++		for (i = base; i < base + extent; i++) 
++			__set_bit(i, bitmap); 
++		else
++		for (i = base; i < base + extent; i++) 
++			clear_bit(i, bitmap); 
 +}
 +
 +/*
@@ -34804,6 +30427,7 @@
 +
 +		memset(bitmap, 0xff, IO_BITMAP_BYTES);
 +		t->io_bitmap_ptr = bitmap;
++		set_thread_flag(TIF_IO_BITMAP);
 +
 +		set_iobitmap.bitmap   = (char *)bitmap;
 +		set_iobitmap.nr_ports = IO_BITMAP_BITS;
@@ -34822,337 +30446,46 @@
 + *
 + */
 +
-+asmlinkage long sys_iopl(unsigned int new_iopl, struct pt_regs *regs)
++asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs)
 +{
-+	unsigned int old_iopl = current->thread.iopl;
++	unsigned int old = current->thread.iopl;
 +	struct physdev_set_iopl set_iopl;
 +
-+	if (new_iopl > 3)
++	if (level > 3)
 +		return -EINVAL;
-+
-+	/* Need "raw I/O" privileges for direct port access. */
-+	if ((new_iopl > old_iopl) && !capable(CAP_SYS_RAWIO))
-+		return -EPERM;
-+
++	/* Trying to gain more privileges? */
++	if (level > old) {
++		if (!capable(CAP_SYS_RAWIO))
++			return -EPERM;
++	}
 +	/* Change our version of the privilege levels. */
-+	current->thread.iopl = new_iopl;
++	current->thread.iopl = level;
 +
 +	/* Force the change at ring 0. */
-+	set_iopl.iopl = (new_iopl == 0) ? 1 : new_iopl;
++	set_iopl.iopl = (level == 0) ? 1 : level;
 +	HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
 +
 +	return 0;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/irqflags-xen.c linux-2.6.18-xen/arch/x86_64/kernel/irqflags-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/irqflags-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/irqflags-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,100 @@
-+#include <linux/module.h>
-+#include <linux/smp.h>
-+#include <asm/irqflags.h>
-+#include <asm/hypervisor.h>
-+
-+/* 
-+ * The use of 'barrier' in the following reflects their use as local-lock
-+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
-+ * critical operations are executed. All critical operations must complete
-+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
-+ * includes these barriers, for example.
-+ */
-+
-+unsigned long __raw_local_save_flags(void)
-+{
-+	struct vcpu_info *_vcpu;
-+	unsigned long flags;
-+
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	flags = _vcpu->evtchn_upcall_mask;
-+	preempt_enable();
-+
-+	return flags;
-+}
-+EXPORT_SYMBOL(__raw_local_save_flags);
-+
-+void raw_local_irq_restore(unsigned long flags)
-+{
-+	struct vcpu_info *_vcpu;
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	if ((_vcpu->evtchn_upcall_mask = flags) == 0) {
-+		barrier(); /* unmask then check (avoid races) */
-+		if ( unlikely(_vcpu->evtchn_upcall_pending) )
-+			force_evtchn_callback();
-+		preempt_enable();
-+	} else
-+		preempt_enable_no_resched();
-+}
-+EXPORT_SYMBOL(raw_local_irq_restore);
-+
-+void raw_local_irq_disable(void)
-+{
-+	struct vcpu_info *_vcpu;
-+
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	_vcpu->evtchn_upcall_mask = 1;
-+	preempt_enable_no_resched();
-+}
-+EXPORT_SYMBOL(raw_local_irq_disable);
-+
-+void raw_local_irq_enable(void)
-+{
-+	struct vcpu_info *_vcpu;
-+
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	_vcpu->evtchn_upcall_mask = 0;
-+	barrier(); /* unmask then check (avoid races) */
-+	if ( unlikely(_vcpu->evtchn_upcall_pending) )
-+		force_evtchn_callback();
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL(raw_local_irq_enable);
-+
-+/*
-+ * For spinlocks, etc.:
-+ */
-+
-+unsigned long __raw_local_irq_save(void)
-+{
-+	struct vcpu_info *_vcpu;
-+	unsigned long flags;
-+
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	flags = _vcpu->evtchn_upcall_mask;
-+	_vcpu->evtchn_upcall_mask = 1;
-+	preempt_enable_no_resched();
-+
-+	return flags;
-+}
-+EXPORT_SYMBOL(__raw_local_irq_save);
-+
-+/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
-+int raw_irqs_disabled(void)
-+{
-+	struct vcpu_info *_vcpu;
-+	int disabled;
-+
-+	preempt_disable();
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
-+	disabled = (_vcpu->evtchn_upcall_mask != 0);
-+	preempt_enable_no_resched();
-+
-+	return disabled;
-+}
-+EXPORT_SYMBOL(raw_irqs_disabled);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/irq-xen.c linux-2.6.18-xen/arch/x86_64/kernel/irq-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/irq-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/irq-xen.c	2006-11-19 14:26:33.000000000 +0100
-@@ -0,0 +1,198 @@
-+/*
-+ *	linux/arch/x86_64/kernel/irq.c
-+ *
-+ *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
-+ *
-+ * This file contains the lowest level x86_64-specific interrupt
-+ * entry and irq statistics code. All the remaining irq logic is
-+ * done by the generic kernel/irq/ code and in the
-+ * x86_64-specific irq controller code. (e.g. i8259.c and
-+ * io_apic.c.)
-+ */
-+
-+#include <linux/kernel_stat.h>
-+#include <linux/interrupt.h>
-+#include <linux/seq_file.h>
-+#include <linux/module.h>
-+#include <linux/delay.h>
-+#include <asm/uaccess.h>
-+#include <asm/io_apic.h>
-+#include <asm/idle.h>
-+
-+atomic_t irq_err_count;
-+#ifdef CONFIG_X86_IO_APIC
-+#ifdef APIC_MISMATCH_DEBUG
-+atomic_t irq_mis_count;
-+#endif
-+#endif
-+
-+#ifdef CONFIG_DEBUG_STACKOVERFLOW
-+/*
-+ * Probabilistic stack overflow check:
-+ *
-+ * Only check the stack in process context, because everything else
-+ * runs on the big interrupt stacks. Checking reliably is too expensive,
-+ * so we just check from interrupts.
-+ */
-+static inline void stack_overflow_check(struct pt_regs *regs)
-+{
-+	u64 curbase = (u64) current->thread_info;
-+	static unsigned long warned = -60*HZ;
-+
-+	if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
-+	    regs->rsp <  curbase + sizeof(struct thread_info) + 128 &&
-+	    time_after(jiffies, warned + 60*HZ)) {
-+		printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
-+		       current->comm, curbase, regs->rsp);
-+		show_stack(NULL,NULL);
-+		warned = jiffies;
-+	}
-+}
-+#endif
-+
-+/*
-+ * Generic, controller-independent functions:
-+ */
-+
-+int show_interrupts(struct seq_file *p, void *v)
-+{
-+	int i = *(loff_t *) v, j;
-+	struct irqaction * action;
-+	unsigned long flags;
-+
-+	if (i == 0) {
-+		seq_printf(p, "           ");
-+		for_each_online_cpu(j)
-+			seq_printf(p, "CPU%-8d       ",j);
-+		seq_putc(p, '\n');
-+	}
-+
-+	if (i < NR_IRQS) {
-+		spin_lock_irqsave(&irq_desc[i].lock, flags);
-+		action = irq_desc[i].action;
-+		if (!action) 
-+			goto skip;
-+		seq_printf(p, "%3d: ",i);
-+#ifndef CONFIG_SMP
-+		seq_printf(p, "%10u ", kstat_irqs(i));
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
+--- a/arch/x86_64/kernel/irq.c	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/kernel/irq.c	2007-03-14 10:55:21.000000000 +0100
+@@ -113,7 +113,11 @@
+ 
+ 	exit_idle();
+ 	irq_enter();
++#ifdef CONFIG_XEN
++	irq = vector;
 +#else
-+		for_each_online_cpu(j)
-+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-+#endif
-+		seq_printf(p, " %14s", irq_desc[i].chip->typename);
-+
-+		seq_printf(p, "  %s", action->name);
-+		for (action=action->next; action; action = action->next)
-+			seq_printf(p, ", %s", action->name);
-+		seq_putc(p, '\n');
-+skip:
-+		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-+	} else if (i == NR_IRQS) {
-+		seq_printf(p, "NMI: ");
-+		for_each_online_cpu(j)
-+			seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
-+		seq_putc(p, '\n');
-+#ifdef CONFIG_X86_LOCAL_APIC
-+		seq_printf(p, "LOC: ");
-+		for_each_online_cpu(j)
-+			seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
-+		seq_putc(p, '\n');
-+#endif
-+		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-+#ifdef CONFIG_X86_IO_APIC
-+#ifdef APIC_MISMATCH_DEBUG
-+		seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
-+#endif
-+#endif
-+	}
-+	return 0;
-+}
-+
-+/*
-+ * do_IRQ handles all normal device IRQ's (the special
-+ * SMP cross-CPU interrupts have their own specific
-+ * handlers).
-+ */
-+asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
-+{	
-+	/* high bit used in ret_from_ code  */
-+	unsigned irq = ~regs->orig_rax;
-+
-+	if (unlikely(irq >= NR_IRQS)) {
-+		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-+					__FUNCTION__, irq);
-+		BUG();
-+	}
-+
-+	exit_idle();
-+	irq_enter();
-+
-+#ifdef CONFIG_DEBUG_STACKOVERFLOW
-+	stack_overflow_check(regs);
-+#endif
-+	__do_IRQ(irq, regs);
-+	irq_exit();
-+
-+	return 1;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+void fixup_irqs(cpumask_t map)
-+{
-+	unsigned int irq;
-+	static int warned;
-+
-+	for (irq = 0; irq < NR_IRQS; irq++) {
-+		cpumask_t mask;
-+		if (irq == 2)
-+			continue;
-+
-+		cpus_and(mask, irq_desc[irq].affinity, map);
-+		if (any_online_cpu(mask) == NR_CPUS) {
-+			printk("Breaking affinity for irq %i\n", irq);
-+			mask = map;
-+		}
-+		if (irq_desc[irq].chip->set_affinity)
-+			irq_desc[irq].chip->set_affinity(irq, mask);
-+		else if (irq_desc[irq].action && !(warned++))
-+			printk("Cannot set affinity for irq %i\n", irq);
-+	}
-+
-+	/* That doesn't seem sufficient.  Give it 1ms. */
-+	local_irq_enable();
-+	mdelay(1);
-+	local_irq_disable();
-+}
-+#endif
-+
-+extern void call_softirq(void);
-+
-+asmlinkage void do_softirq(void)
-+{
-+ 	__u32 pending;
-+ 	unsigned long flags;
-+
-+ 	if (in_interrupt())
-+ 		return;
-+
-+ 	local_irq_save(flags);
-+ 	pending = local_softirq_pending();
-+ 	/* Switch to interrupt stack */
-+ 	if (pending) {
-+		call_softirq();
-+		WARN_ON_ONCE(softirq_count());
-+	}
-+ 	local_irq_restore(flags);
-+}
-+EXPORT_SYMBOL(do_softirq);
-+
-+#ifndef CONFIG_X86_LOCAL_APIC
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+        printk("unexpected IRQ trap at vector %02x\n", irq);
-+}
+ 	irq = __get_cpu_var(vector_irq)[vector];
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/ldt-xen.c linux-2.6.18-xen/arch/x86_64/kernel/ldt-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/ldt-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/ldt-xen.c	2006-11-19 14:26:33.000000000 +0100
-@@ -0,0 +1,282 @@
+ 
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+ 	stack_overflow_check(regs);
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/ldt-xen.c b/arch/x86_64/kernel/ldt-xen.c
+--- a/arch/x86_64/kernel/ldt-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/ldt-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,283 @@
 +/*
 + * linux/arch/x86_64/kernel/ldt.c
 + *
@@ -35219,6 +30552,7 @@
 +		cpumask_t mask;
 +
 +		preempt_disable();
++		mask = cpumask_of_cpu(smp_processor_id());
 +#endif
 +		make_pages_readonly(
 +			pc->ldt,
@@ -35226,7 +30560,6 @@
 +			XENFEAT_writable_descriptor_tables);
 +		load_LDT(pc);
 +#ifdef CONFIG_SMP
-+		mask = cpumask_of_cpu(smp_processor_id());
 +		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
 +			smp_call_function(flush_ldt, NULL, 1, 1);
 +		preempt_enable();
@@ -35269,6 +30602,7 @@
 +
 +	memset(&mm->context, 0, sizeof(mm->context));
 +	init_MUTEX(&mm->context.sem);
++	mm->context.size = 0;
 +	old_mm = current->mm;
 +	if (old_mm && old_mm->context.size > 0) {
 +		down(&old_mm->context.sem);
@@ -35435,15 +30769,15 @@
 +	}
 +	return ret;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/Makefile linux-2.6.18-xen/arch/x86_64/kernel/Makefile
---- linux-2.6.18.3/arch/x86_64/kernel/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/kernel/Makefile	2006-12-05 18:42:36.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
+--- a/arch/x86_64/kernel/Makefile	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/kernel/Makefile	2007-03-14 10:55:21.000000000 +0100
 @@ -21,11 +21,13 @@
  obj-$(CONFIG_X86_CPUID)		+= cpuid.o
  obj-$(CONFIG_SMP)		+= smp.o smpboot.o trampoline.o
- obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o  nmi.o
+ obj-y				+= apic.o  nmi.o
 +obj-$(CONFIG_X86_XEN_GENAPIC)	+= genapic.o genapic_xen.o
- obj-$(CONFIG_X86_IO_APIC)	+= io_apic.o mpparse.o \
+ obj-y				+= io_apic.o mpparse.o \
  		genapic.o genapic_cluster.o genapic_flat.o
  obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o crash.o
  obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
@@ -35453,4503 +30787,4528 @@
  obj-$(CONFIG_SOFTWARE_SUSPEND)	+= suspend_asm.o
  obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
  obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
-@@ -55,3 +57,18 @@
+@@ -56,3 +58,17 @@
+ i8237-y				+= ../../i386/kernel/i8237.o
  msr-$(subst m,y,$(CONFIG_X86_MSR))  += ../../i386/kernel/msr.o
  alternative-y			+= ../../i386/kernel/alternative.o
- 
++
 +ifdef CONFIG_XEN
 +obj-y				+= irqflags.o
-+time-y				+= ../../i386/kernel/time-xen.o
++irqflags-y			+= ../../i386/mach-xen/irqflags.o
 +pci-dma-y			+= ../../i386/kernel/pci-dma-xen.o
 +microcode-$(subst m,y,$(CONFIG_MICROCODE))  := ../../i386/kernel/microcode-xen.o
-+quirks-y			:= ../../i386/kernel/quirks-xen.o
++n-obj-xen := i8259.o reboot.o i8237.o smpboot.o trampoline.o
++
++include $(srctree)/scripts/Makefile.xen
++
++obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
++obj-y := $(call cherrypickxen, $(obj-y))
++extra-y := $(call cherrypickxen, $(extra-y))
++endif
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
+--- a/arch/x86_64/kernel/mpparse.c	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/kernel/mpparse.c	2007-03-14 10:55:21.000000000 +0100
+@@ -90,6 +90,7 @@
+ 
+ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
+ {
++#ifndef CONFIG_XEN
+ 	int cpu;
+ 	cpumask_t tmp_map;
+ 	char *bootup_cpu = "";
+@@ -110,8 +111,9 @@
+ 			" Processor ignored.\n", NR_CPUS);
+ 		return;
+ 	}
+-
++#endif /* !CONFIG_XEN */
+ 	num_processors++;
++#ifndef CONFIG_XEN
+ 	cpus_complement(tmp_map, cpu_present_map);
+ 	cpu = first_cpu(tmp_map);
+ 
+@@ -129,6 +131,7 @@
+ 
+ 	cpu_set(cpu, cpu_possible_map);
+ 	cpu_set(cpu, cpu_present_map);
++#endif /* CONFIG_XEN */
+ }
+ 
+ static void __init MP_bus_info (struct mpc_config_bus *m)
+@@ -488,7 +491,11 @@
+ 		 * Read the physical hardware table.  Anything here will
+ 		 * override the defaults.
+ 		 */
++#ifdef CONFIG_XEN
++ 		if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++#else
+ 		if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
++#endif
+ 			smp_found_config = 0;
+ 			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
+ 			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
+@@ -524,7 +531,12 @@
+ static int __init smp_scan_config (unsigned long base, unsigned long length)
+ {
+ 	extern void __bad_mpf_size(void); 
++#ifdef CONFIG_XEN
++	unsigned int *bp = isa_bus_to_virt(base);
++#else
+ 	unsigned int *bp = phys_to_virt(base);
++#endif
++
+ 	struct intel_mp_floating *mpf;
+ 
+ 	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
+@@ -540,9 +552,11 @@
+ 				|| (mpf->mpf_specification == 4)) ) {
+ 
+ 			smp_found_config = 1;
++#ifndef CONFIG_XEN
+ 			reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE);
+ 			if (mpf->mpf_physptr)
+ 				reserve_bootmem_generic(mpf->mpf_physptr, PAGE_SIZE);
++#endif
+ 			mpf_found = mpf;
+ 			return 1;
+ 		}
+@@ -598,10 +612,12 @@
+ 
+ void __init mp_register_lapic_address(u64 address)
+ {
++#ifndef CONFIG_XEN
+ 	mp_lapic_addr = (unsigned long) address;
+ 	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
+ 	if (boot_cpu_id == -1U)
+ 		boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
++#endif
+ }
+ 
+ void __cpuinit mp_register_lapic (u8 id, u8 enabled)
+@@ -612,6 +628,7 @@
+ 	if (id == boot_cpu_id)
+ 		boot_cpu = 1;
+ 
++#ifndef CONFIG_XEN
+ 	processor.mpc_type = MP_PROCESSOR;
+ 	processor.mpc_apicid = id;
+ 	processor.mpc_apicver = 0;
+@@ -621,6 +638,7 @@
+ 	processor.mpc_featureflag = 0;
+ 	processor.mpc_reserved[0] = 0;
+ 	processor.mpc_reserved[1] = 0;
++#endif
+ 
+ 	MP_processor_info(&processor);
+ }
+@@ -663,7 +681,9 @@
+ 	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
+ 	mp_ioapics[idx].mpc_apicaddr = address;
+ 
++#ifndef CONFIG_XEN
+ 	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++#endif
+ 	mp_ioapics[idx].mpc_apicid = id;
+ 	mp_ioapics[idx].mpc_apicver = 0;
+ 	
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
+--- a/arch/x86_64/kernel/pci-swiotlb.c	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/kernel/pci-swiotlb.c	2007-03-14 10:55:21.000000000 +0100
+@@ -9,10 +9,13 @@
+ #include <asm/swiotlb.h>
+ #include <asm/dma.h>
+ 
++#ifndef CONFIG_XEN
+ int swiotlb __read_mostly;
+ EXPORT_SYMBOL(swiotlb);
++#endif
+ 
+ struct dma_mapping_ops swiotlb_dma_ops = {
++#ifndef CONFIG_XEN
+ 	.mapping_error = swiotlb_dma_mapping_error,
+ 	.alloc_coherent = swiotlb_alloc_coherent,
+ 	.free_coherent = swiotlb_free_coherent,
+@@ -27,10 +30,18 @@
+ 	.map_sg = swiotlb_map_sg,
+ 	.unmap_sg = swiotlb_unmap_sg,
+ 	.dma_supported = NULL,
++#endif
+ };
+ 
+ void pci_swiotlb_init(void)
+ {
++#ifdef CONFIG_XEN
++	swiotlb_init();
++	if (swiotlb) {
++		printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
++		dma_ops = &swiotlb_dma_ops;
++	}
++#else
+ 	/* don't initialize swiotlb if iommu=off (no_iommu=1) */
+ 	if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
+ 	       swiotlb = 1;
+@@ -41,4 +52,5 @@
+ 		swiotlb_init();
+ 		dma_ops = &swiotlb_dma_ops;
+ 	}
++#endif /* CONFIG_XEN */
+ }
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/process-xen.c b/arch/x86_64/kernel/process-xen.c
+--- a/arch/x86_64/kernel/process-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/process-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,865 @@
++/*
++ *  linux/arch/x86-64/kernel/process.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Pentium III FXSR, SSE support
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ * 
++ *  X86-64 port
++ *	Andi Kleen.
++ *
++ *	CPU hotplug support - ashok.raj at intel.com
++ * 
++ *  Jun Nakajima <jun.nakajima at intel.com> 
++ *     Modified for Xen
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/slab.h>
++#include <linux/user.h>
++#include <linux/module.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/random.h>
++#include <linux/notifier.h>
++#include <linux/kprobes.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/mmu_context.h>
++#include <asm/pda.h>
++#include <asm/prctl.h>
++#include <asm/kdebug.h>
++#include <xen/interface/dom0_ops.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/hardirq.h>
++#include <asm/ia32.h>
++#include <asm/idle.h>
 +
-+n-obj-xen := i8259.o reboot.o i8237.o smpboot.o trampoline.o
++#include <xen/cpu_hotplug.h>
 +
-+include $(srctree)/scripts/Makefile.xen
++asmlinkage extern void ret_from_fork(void);
++
++unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
 +
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+extra-y := $(call cherrypickxen, $(extra-y))
-+endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/mpparse-xen.c linux-2.6.18-xen/arch/x86_64/kernel/mpparse-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/mpparse-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/mpparse-xen.c	2006-11-19 14:26:33.000000000 +0100
-@@ -0,0 +1,1011 @@
 +/*
-+ *	Intel Multiprocessor Specification 1.1 and 1.4
-+ *	compliant MP-table parsing routines.
-+ *
-+ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
-+ *	(c) 1998, 1999, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *		Erich Boleyn	:	MP v1.4 and additional changes.
-+ *		Alan Cox	:	Added EBDA scanning
-+ *		Ingo Molnar	:	various cleanups and rewrites
-+ *		Maciej W. Rozycki:	Bits for default MP configurations
-+ *		Paul Diefenbaugh:	Added full ACPI support
++ * Powermanagement idle function, if any..
 + */
++void (*pm_idle)(void);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
 +
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/acpi.h>
-+#include <linux/module.h>
++static ATOMIC_NOTIFIER_HEAD(idle_notifier);
 +
-+#include <asm/smp.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/pgalloc.h>
-+#include <asm/io_apic.h>
-+#include <asm/proto.h>
-+#include <asm/acpi.h>
++void idle_notifier_register(struct notifier_block *n)
++{
++	atomic_notifier_chain_register(&idle_notifier, n);
++}
++EXPORT_SYMBOL_GPL(idle_notifier_register);
 +
-+/* Have we found an MP table */
-+int smp_found_config;
-+unsigned int __initdata maxcpus = NR_CPUS;
++void idle_notifier_unregister(struct notifier_block *n)
++{
++	atomic_notifier_chain_unregister(&idle_notifier, n);
++}
++EXPORT_SYMBOL(idle_notifier_unregister);
 +
-+int acpi_found_madt;
++void enter_idle(void)
++{
++	write_pda(isidle, 1);
++	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
++}
 +
-+/*
-+ * Various Linux-internal data structures created from the
-+ * MP-table.
-+ */
-+unsigned char apic_version [MAX_APICS];
-+unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++static void __exit_idle(void)
++{
++	if (test_and_clear_bit_pda(0, isidle) == 0)
++		return;
++	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
++}
 +
-+static int mp_current_pci_id = 0;
-+/* I/O APIC entries */
-+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++/* Called from interrupts to signify idle end */
++void exit_idle(void)
++{
++	/* idle loop has pid 0 */
++	if (current->pid)
++		return;
++	__exit_idle();
++}
 +
-+/* # of MP IRQ source entries */
-+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
++void xen_idle(void)
++{
++	current_thread_info()->status &= ~TS_POLLING;
++	/*
++	 * TS_POLLING-cleared state must be visible before we
++	 * test NEED_RESCHED:
++	 */
++	smp_mb();
++	local_irq_disable();
++	if (!need_resched()) {
++		/* Enables interrupts one instruction before HLT.
++		   x86 special cases this so there is no race. */
++		safe_halt();
++	} else
++		local_irq_enable();
++	current_thread_info()->status |= TS_POLLING;
++}
 +
-+/* MP IRQ source entries */
-+int mp_irq_entries;
++#ifdef CONFIG_HOTPLUG_CPU
++static inline void play_dead(void)
++{
++	idle_task_exit();
++	local_irq_disable();
++	cpu_clear(smp_processor_id(), cpu_initialized);
++	preempt_enable_no_resched();
++	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
++	cpu_bringup();
++}
++#else
++static inline void play_dead(void)
++{
++	BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
 +
-+int nr_ioapics;
-+int pic_mode;
-+unsigned long mp_lapic_addr = 0;
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle (void)
++{
++	current_thread_info()->status |= TS_POLLING;
++	/* endless idle loop with no priority at all */
++	while (1) {
++		while (!need_resched()) {
++			if (__get_cpu_var(cpu_idle_state))
++				__get_cpu_var(cpu_idle_state) = 0;
 +
++			rmb();
++			if (cpu_is_offline(smp_processor_id()))
++				play_dead();
++			/*
++			 * Idle routines should keep interrupts disabled
++			 * from here on, until they go to idle.
++			 * Otherwise, idle callbacks can misfire.
++			 */
++			local_irq_disable();
++			enter_idle();
++			xen_idle();
++			/* In many cases the interrupt that ended idle
++			   has already called exit_idle. But some idle
++			   loops can be woken up without interrupt. */
++			__exit_idle();
++		}
 +
++		preempt_enable_no_resched();
++		schedule();
++		preempt_disable();
++	}
++}
 +
-+/* Processor that is doing the boot up */
-+unsigned int boot_cpu_id = -1U;
-+/* Internal processor count */
-+unsigned int num_processors __initdata = 0;
++void cpu_idle_wait(void)
++{
++	unsigned int cpu, this_cpu = get_cpu();
++	cpumask_t map, tmp = current->cpus_allowed;
 +
-+unsigned disabled_cpus __initdata;
++	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++	put_cpu();
 +
-+/* Bitmask of physically existing CPUs */
-+physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
++ 	cpus_clear(map);
++	for_each_online_cpu(cpu) {
++		per_cpu(cpu_idle_state, cpu) = 1;
++		cpu_set(cpu, map);
++	}
 +
-+/* ACPI MADT entry parsing functions */
-+#ifdef CONFIG_ACPI
-+extern struct acpi_boot_flags acpi_boot;
-+#ifdef CONFIG_X86_LOCAL_APIC
-+extern int acpi_parse_lapic (acpi_table_entry_header *header);
-+extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
-+extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
-+#endif /*CONFIG_X86_LOCAL_APIC*/
-+#ifdef CONFIG_X86_IO_APIC
-+extern int acpi_parse_ioapic (acpi_table_entry_header *header);
-+#endif /*CONFIG_X86_IO_APIC*/
-+#endif /*CONFIG_ACPI*/
++	__get_cpu_var(cpu_idle_state) = 0;
 +
-+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++	wmb();
++	do {
++		ssleep(1);
++		for_each_online_cpu(cpu) {
++			if (cpu_isset(cpu, map) &&
++					!per_cpu(cpu_idle_state, cpu))
++				cpu_clear(cpu, map);
++		}
++		cpus_and(map, map, cpu_online_map);
++	} while (!cpus_empty(map));
 +
++	set_cpus_allowed(current, tmp);
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
 +
-+/*
-+ * Intel MP BIOS table parsing routines:
-+ */
++/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
++/* Always use xen_idle() instead. */
++void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) {}
 +
-+/*
-+ * Checksum an MP configuration block.
-+ */
++void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) {}
 +
-+static int __init mpf_checksum(unsigned char *mp, int len)
++/* Prints also some state that isn't saved in the pt_regs */ 
++void __show_regs(struct pt_regs * regs)
 +{
-+	int sum = 0;
++	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
++	unsigned int fsindex,gsindex;
++	unsigned int ds,cs,es; 
 +
-+	while (len--)
-+		sum += *mp++;
++	printk("\n");
++	print_modules();
++	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
++		current->pid, current->comm, print_tainted(),
++		init_utsname()->release,
++		(int)strcspn(init_utsname()->version, " "),
++		init_utsname()->version);
++	printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
++	printk_address(regs->rip); 
++	printk("RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, regs->rsp,
++		regs->eflags);
++	printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
++	       regs->rax, regs->rbx, regs->rcx);
++	printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
++	       regs->rdx, regs->rsi, regs->rdi); 
++	printk("RBP: %016lx R08: %016lx R09: %016lx\n",
++	       regs->rbp, regs->r8, regs->r9); 
++	printk("R10: %016lx R11: %016lx R12: %016lx\n",
++	       regs->r10, regs->r11, regs->r12); 
++	printk("R13: %016lx R14: %016lx R15: %016lx\n",
++	       regs->r13, regs->r14, regs->r15); 
 +
-+	return sum & 0xFF;
-+}
++	asm("movl %%ds,%0" : "=r" (ds)); 
++	asm("movl %%cs,%0" : "=r" (cs)); 
++	asm("movl %%es,%0" : "=r" (es)); 
++	asm("movl %%fs,%0" : "=r" (fsindex));
++	asm("movl %%gs,%0" : "=r" (gsindex));
 +
-+#ifndef CONFIG_XEN
-+static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
-+{
-+	int cpu;
-+	unsigned char ver;
-+	cpumask_t tmp_map;
++	rdmsrl(MSR_FS_BASE, fs);
++	rdmsrl(MSR_GS_BASE, gs); 
++	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 
 +
-+	if (!(m->mpc_cpuflag & CPU_ENABLED)) {
-+		disabled_cpus++;
-+		return;
-+	}
++	cr0 = read_cr0();
++	cr2 = 0; /* No real clue how to read it. JQ */
++	cr3 = read_cr3();
++	cr4 = read_cr4();
 +
-+	printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
-+		m->mpc_apicid,
-+	       (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
-+	       (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
-+		m->mpc_apicver);
++	printk("FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 
++	       fs,fsindex,gs,gsindex,shadowgs); 
++	printk("CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); 
++	printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
++}
 +
-+	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+		Dprintk("    Bootup CPU\n");
-+		boot_cpu_id = m->mpc_apicid;
-+	}
-+	if (num_processors >= NR_CPUS) {
-+		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-+			" Processor ignored.\n", NR_CPUS);
-+		return;
-+	}
++void show_regs(struct pt_regs *regs)
++{
++	printk("CPU %d:", smp_processor_id());
++	__show_regs(regs);
++	show_trace(NULL, regs, &regs->rsp);
++}
 +
-+	num_processors++;
-+	cpus_complement(tmp_map, cpu_present_map);
-+	cpu = first_cpu(tmp_map);
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++	struct task_struct *me = current;
++	struct thread_struct *t = &me->thread;
 +
-+#if MAX_APICS < 255	
-+	if ((int)m->mpc_apicid > MAX_APICS) {
-+		printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
-+			m->mpc_apicid, MAX_APICS);
-+		return;
-+	}
++	if (me->thread.io_bitmap_ptr) { 
++#ifndef CONFIG_X86_NO_TSS
++		struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++#endif
++#ifdef CONFIG_XEN
++		struct physdev_set_iobitmap iobmp_op = { 0 };
 +#endif
-+	ver = m->mpc_apicver;
 +
-+	physid_set(m->mpc_apicid, phys_cpu_present_map);
-+	/*
-+	 * Validate version
-+	 */
-+	if (ver == 0x0) {
-+		printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
-+		ver = 0x10;
++		kfree(t->io_bitmap_ptr);
++		t->io_bitmap_ptr = NULL;
++		clear_thread_flag(TIF_IO_BITMAP);
++		/*
++		 * Careful, clear this in the TSS too:
++		 */
++#ifndef CONFIG_X86_NO_TSS
++		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
++		put_cpu();
++#endif
++#ifdef CONFIG_XEN
++		HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobmp_op);
++#endif
++		t->io_bitmap_max = 0;
 +	}
-+	apic_version[m->mpc_apicid] = ver;
-+ 	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+ 		/*
-+ 		 * bios_cpu_apicid is required to have processors listed
-+ 		 * in same order as logical cpu numbers. Hence the first
-+ 		 * entry is BSP, and so on.
-+ 		 */
-+		cpu = 0;
-+ 	}
-+	bios_cpu_apicid[cpu] = m->mpc_apicid;
-+	x86_cpu_to_apicid[cpu] = m->mpc_apicid;
-+
-+	cpu_set(cpu, cpu_possible_map);
-+	cpu_set(cpu, cpu_present_map);
 +}
-+#else
-+void __init MP_processor_info (struct mpc_config_processor *m)
++
++void load_gs_index(unsigned gs)
 +{
-+	num_processors++;
++	HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs);
 +}
-+#endif /* CONFIG_XEN */
 +
-+static void __init MP_bus_info (struct mpc_config_bus *m)
++void flush_thread(void)
 +{
-+	char str[7];
++	struct task_struct *tsk = current;
++	struct thread_info *t = current_thread_info();
 +
-+	memcpy(str, m->mpc_bustype, 6);
-+	str[6] = 0;
-+	Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
-+
-+	if (strncmp(str, "ISA", 3) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
-+	} else if (strncmp(str, "EISA", 4) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
-+	} else if (strncmp(str, "PCI", 3) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
-+		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
-+		mp_current_pci_id++;
-+	} else if (strncmp(str, "MCA", 3) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
-+	} else {
-+		printk(KERN_ERR "Unknown bustype %s\n", str);
++	if (t->flags & _TIF_ABI_PENDING) {
++		t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
++		if (t->flags & _TIF_IA32)
++			current_thread_info()->status |= TS_COMPAT;
 +	}
++	t->flags &= ~_TIF_DEBUG;
++
++	tsk->thread.debugreg0 = 0;
++	tsk->thread.debugreg1 = 0;
++	tsk->thread.debugreg2 = 0;
++	tsk->thread.debugreg3 = 0;
++	tsk->thread.debugreg6 = 0;
++	tsk->thread.debugreg7 = 0;
++	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
++	/*
++	 * Forget coprocessor state..
++	 */
++	clear_fpu(tsk);
++	clear_used_math();
 +}
 +
-+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++void release_thread(struct task_struct *dead_task)
 +{
-+	if (!(m->mpc_flags & MPC_APIC_USABLE))
-+		return;
-+
-+	printk("I/O APIC #%d Version %d at 0x%X.\n",
-+		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
-+	if (nr_ioapics >= MAX_IO_APICS) {
-+		printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
-+			MAX_IO_APICS, nr_ioapics);
-+		panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
-+	}
-+	if (!m->mpc_apicaddr) {
-+		printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
-+			" found in MP table, skipping!\n");
-+		return;
++	if (dead_task->mm) {
++		if (dead_task->mm->context.size) {
++			printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
++					dead_task->comm,
++					dead_task->mm->context.ldt,
++					dead_task->mm->context.size);
++			BUG();
++		}
 +	}
-+	mp_ioapics[nr_ioapics] = *m;
-+	nr_ioapics++;
 +}
 +
-+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
 +{
-+	mp_irqs [mp_irq_entries] = *m;
-+	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
-+		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
-+			m->mpc_irqtype, m->mpc_irqflag & 3,
-+			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-+			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
-+	if (++mp_irq_entries >= MAX_IRQ_SOURCES)
-+		panic("Max # of irq sources exceeded!!\n");
++	struct user_desc ud = { 
++		.base_addr = addr,
++		.limit = 0xfffff,
++		.seg_32bit = 1,
++		.limit_in_pages = 1,
++		.useable = 1,
++	};
++	struct n_desc_struct *desc = (void *)t->thread.tls_array;
++	desc += tls;
++	desc->a = LDT_entry_a(&ud); 
++	desc->b = LDT_entry_b(&ud); 
 +}
 +
-+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++static inline u32 read_32bit_tls(struct task_struct *t, int tls)
 +{
-+	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
-+		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-+			m->mpc_irqtype, m->mpc_irqflag & 3,
-+			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
-+			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
-+	/*
-+	 * Well it seems all SMP boards in existence
-+	 * use ExtINT/LVT1 == LINT0 and
-+	 * NMI/LVT2 == LINT1 - the following check
-+	 * will show us if this assumptions is false.
-+	 * Until then we do not have to add baggage.
-+	 */
-+	if ((m->mpc_irqtype == mp_ExtINT) &&
-+		(m->mpc_destapiclint != 0))
-+			BUG();
-+	if ((m->mpc_irqtype == mp_NMI) &&
-+		(m->mpc_destapiclint != 1))
-+			BUG();
++	struct desc_struct *desc = (void *)t->thread.tls_array;
++	desc += tls;
++	return desc->base0 | 
++		(((u32)desc->base1) << 16) | 
++		(((u32)desc->base2) << 24);
 +}
 +
 +/*
-+ * Read/parse the MPC
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
 + */
++void prepare_to_copy(struct task_struct *tsk)
++{
++	unlazy_fpu(tsk);
++}
 +
-+static int __init smp_read_mpc(struct mp_config_table *mpc)
++int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, 
++		unsigned long unused,
++	struct task_struct * p, struct pt_regs * regs)
 +{
-+	char str[16];
-+	int count=sizeof(*mpc);
-+	unsigned char *mpt=((unsigned char *)mpc)+count;
++	int err;
++	struct pt_regs * childregs;
++	struct task_struct *me = current;
 +
-+	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
-+		printk("SMP mptable: bad signature [%c%c%c%c]!\n",
-+			mpc->mpc_signature[0],
-+			mpc->mpc_signature[1],
-+			mpc->mpc_signature[2],
-+			mpc->mpc_signature[3]);
-+		return 0;
-+	}
-+	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
-+		printk("SMP mptable: checksum error!\n");
-+		return 0;
-+	}
-+	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
-+		printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
-+			mpc->mpc_spec);
-+		return 0;
-+	}
-+	if (!mpc->mpc_lapic) {
-+		printk(KERN_ERR "SMP mptable: null local APIC address!\n");
-+		return 0;
-+	}
-+	memcpy(str,mpc->mpc_oem,8);
-+	str[8]=0;
-+	printk(KERN_INFO "OEM ID: %s ",str);
++	childregs = ((struct pt_regs *)
++			(THREAD_SIZE + task_stack_page(p))) - 1;
++	*childregs = *regs;
++
++	childregs->rax = 0;
++	childregs->rsp = rsp;
++	if (rsp == ~0UL)
++		childregs->rsp = (unsigned long)childregs;
++
++	p->thread.rsp = (unsigned long) childregs;
++	p->thread.rsp0 = (unsigned long) (childregs+1);
++	p->thread.userrsp = me->thread.userrsp; 
++
++	set_tsk_thread_flag(p, TIF_FORK);
 +
-+	memcpy(str,mpc->mpc_productid,12);
-+	str[12]=0;
-+	printk("Product ID: %s ",str);
++	p->thread.fs = me->thread.fs;
++	p->thread.gs = me->thread.gs;
 +
-+	printk("APIC at: 0x%X\n",mpc->mpc_lapic);
++	asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
++	asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
++	asm("mov %%es,%0" : "=m" (p->thread.es));
++	asm("mov %%ds,%0" : "=m" (p->thread.ds));
 +
-+	/* save the local APIC address, it might be non-default */
-+	if (!acpi_lapic)
-+	mp_lapic_addr = mpc->mpc_lapic;
++	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
++		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++		if (!p->thread.io_bitmap_ptr) {
++			p->thread.io_bitmap_max = 0;
++			return -ENOMEM;
++		}
++		memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
++				IO_BITMAP_BYTES);
++		set_tsk_thread_flag(p, TIF_IO_BITMAP);
++	} 
 +
 +	/*
-+	 *	Now process the configuration blocks.
++	 * Set a new TLS for the child thread?
 +	 */
-+	while (count < mpc->mpc_length) {
-+		switch(*mpt) {
-+			case MP_PROCESSOR:
-+			{
-+				struct mpc_config_processor *m=
-+					(struct mpc_config_processor *)mpt;
-+				if (!acpi_lapic)
-+				MP_processor_info(m);
-+				mpt += sizeof(*m);
-+				count += sizeof(*m);
-+				break;
-+			}
-+			case MP_BUS:
-+			{
-+				struct mpc_config_bus *m=
-+					(struct mpc_config_bus *)mpt;
-+				MP_bus_info(m);
-+				mpt += sizeof(*m);
-+				count += sizeof(*m);
-+				break;
-+			}
-+			case MP_IOAPIC:
-+			{
-+				struct mpc_config_ioapic *m=
-+					(struct mpc_config_ioapic *)mpt;
-+				MP_ioapic_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			case MP_INTSRC:
-+			{
-+				struct mpc_config_intsrc *m=
-+					(struct mpc_config_intsrc *)mpt;
++	if (clone_flags & CLONE_SETTLS) {
++#ifdef CONFIG_IA32_EMULATION
++		if (test_thread_flag(TIF_IA32))
++			err = ia32_child_tls(p, childregs); 
++		else 			
++#endif	 
++			err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 
++		if (err) 
++			goto out;
++	}
++        p->thread.iopl = current->thread.iopl;
 +
-+				MP_intsrc_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			case MP_LINTSRC:
-+			{
-+				struct mpc_config_lintsrc *m=
-+					(struct mpc_config_lintsrc *)mpt;
-+				MP_lintsrc_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+		}
++	err = 0;
++out:
++	if (err && p->thread.io_bitmap_ptr) {
++		kfree(p->thread.io_bitmap_ptr);
++		p->thread.io_bitmap_max = 0;
 +	}
-+	clustered_apic_check();
-+	if (!num_processors)
-+		printk(KERN_ERR "SMP mptable: no processors registered!\n");
-+	return num_processors;
++	return err;
 +}
 +
-+static int __init ELCR_trigger(unsigned int irq)
++static inline void __save_init_fpu( struct task_struct *tsk )
 +{
-+	unsigned int port;
++	asm volatile( "rex64 ; fxsave %0 ; fnclex"
++		      : "=m" (tsk->thread.i387.fxsave));
++	tsk->thread_info->status &= ~TS_USEDFPU;
++}
 +
-+	port = 0x4d0 + (irq >> 3);
-+	return (inb(port) >> (irq & 7)) & 1;
++/*
++ * This special macro can be used to load a debugging register
++ */
++#define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
++
++static inline void __switch_to_xtra(struct task_struct *prev_p,
++			     	    struct task_struct *next_p)
++{
++	struct thread_struct *prev, *next;
++
++	prev = &prev_p->thread,
++	next = &next_p->thread;
++
++	if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
++		loaddebug(next, 0);
++		loaddebug(next, 1);
++		loaddebug(next, 2);
++		loaddebug(next, 3);
++		/* no 4 and 5 */
++		loaddebug(next, 6);
++		loaddebug(next, 7);
++	}
++#ifndef CONFIG_XEN
++	if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
++		/*
++		 * Copy the relevant range of the IO bitmap.
++		 * Normally this is 128 bytes or less:
++		 */
++		memcpy(tss->io_bitmap, next->io_bitmap_ptr,
++		       max(prev->io_bitmap_max, next->io_bitmap_max));
++	} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
++		/*
++		 * Clear any possible leftover bits:
++		 */
++		memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
++	}
++#endif
 +}
 +
-+static void __init construct_default_ioirq_mptable(int mpc_default_type)
++/*
++ *	switch_to(x,y) should switch tasks from x to y.
++ *
++ * This could still be optimized: 
++ * - fold all the options into a flag word and test it with a single test.
++ * - could test fs/gs bitsliced
++ *
++ * Kprobes not supported here. Set the probe on schedule instead.
++ */
++__kprobes struct task_struct *
++__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 +{
-+	struct mpc_config_intsrc intsrc;
-+	int i;
-+	int ELCR_fallback = 0;
++	struct thread_struct *prev = &prev_p->thread,
++				 *next = &next_p->thread;
++	int cpu = smp_processor_id();  
++#ifndef CONFIG_X86_NO_TSS
++	struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#endif
++	struct physdev_set_iopl iopl_op;
++	struct physdev_set_iobitmap iobmp_op;
++	multicall_entry_t _mcl[8], *mcl = _mcl;
 +
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqflag = 0;			/* conforming */
-+	intsrc.mpc_srcbus = 0;
-+	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++	/*
++	 * Reload esp0, LDT and the page table pointer:
++	 */
++	mcl->op      = __HYPERVISOR_stack_switch;
++	mcl->args[0] = __KERNEL_DS;
++	mcl->args[1] = next->rsp0;
++	mcl++;
 +
-+	intsrc.mpc_irqtype = mp_INT;
++	/* we're going to use this soon, after a few expensive things */
++	if (next_p->fpu_counter>5)
++		prefetch(&next->i387.fxsave);
 +
 +	/*
-+	 *  If true, we have an ISA/PCI system with no IRQ entries
-+	 *  in the MP table. To prevent the PCI interrupts from being set up
-+	 *  incorrectly, we try to use the ELCR. The sanity check to see if
-+	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
-+	 *  never be level sensitive, so we simply see if the ELCR agrees.
-+	 *  If it does, we assume it's valid.
++	 * Load the per-thread Thread-Local Storage descriptor.
++	 * This is load_TLS(next, cpu) with multicalls.
 +	 */
-+	if (mpc_default_type == 5) {
-+		printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++#define C(i) do {							\
++	if (unlikely(next->tls_array[i] != prev->tls_array[i])) {	\
++		mcl->op      = __HYPERVISOR_update_descriptor;		\
++		mcl->args[0] = virt_to_machine(				\
++			&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]);		\
++		mcl->args[1] = next->tls_array[i];			\
++		mcl++;							\
++	}								\
++} while (0)
++	C(0); C(1); C(2);
++#undef C
 +
-+		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
-+			printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
-+		else {
-+			printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
-+			ELCR_fallback = 1;
-+		}
++	if (unlikely(prev->iopl != next->iopl)) {
++		iopl_op.iopl = (next->iopl == 0) ? 1 : next->iopl;
++		mcl->op      = __HYPERVISOR_physdev_op;
++		mcl->args[0] = PHYSDEVOP_set_iopl;
++		mcl->args[1] = (unsigned long)&iopl_op;
++		mcl++;
 +	}
 +
-+	for (i = 0; i < 16; i++) {
-+		switch (mpc_default_type) {
-+		case 2:
-+			if (i == 0 || i == 13)
-+				continue;	/* IRQ0 & IRQ13 not connected */
-+			/* fall through */
-+		default:
-+			if (i == 2)
-+				continue;	/* IRQ2 is never connected */
-+		}
++	if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP) ||
++	    test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
++		iobmp_op.bitmap   = (char *)next->io_bitmap_ptr;
++		iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++		mcl->op      = __HYPERVISOR_physdev_op;
++		mcl->args[0] = PHYSDEVOP_set_iobitmap;
++		mcl->args[1] = (unsigned long)&iobmp_op;
++		mcl++;
++	}
 +
-+		if (ELCR_fallback) {
-+			/*
-+			 *  If the ELCR indicates a level-sensitive interrupt, we
-+			 *  copy that information over to the MP table in the
-+			 *  irqflag field (level sensitive, active high polarity).
-+			 */
-+			if (ELCR_trigger(i))
-+				intsrc.mpc_irqflag = 13;
-+			else
-+				intsrc.mpc_irqflag = 0;
-+		}
++	(void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
++	/* 
++	 * Switch DS and ES.
++	 * This won't pick up thread selector changes, but I guess that is ok.
++	 */
++	if (unlikely(next->es))
++		loadsegment(es, next->es); 
++	
++	if (unlikely(next->ds))
++		loadsegment(ds, next->ds);
 +
-+		intsrc.mpc_srcbusirq = i;
-+		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
-+		MP_intsrc_info(&intsrc);
-+	}
++	/* 
++	 * Switch FS and GS.
++	 */
++	if (unlikely(next->fsindex))
++		loadsegment(fs, next->fsindex);
 +
-+	intsrc.mpc_irqtype = mp_ExtINT;
-+	intsrc.mpc_srcbusirq = 0;
-+	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
-+	MP_intsrc_info(&intsrc);
-+}
++	if (next->fs)
++		HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs); 
++	
++	if (unlikely(next->gsindex))
++		load_gs_index(next->gsindex);
 +
-+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
-+{
-+	struct mpc_config_processor processor;
-+	struct mpc_config_bus bus;
-+	struct mpc_config_ioapic ioapic;
-+	struct mpc_config_lintsrc lintsrc;
-+	int linttypes[2] = { mp_ExtINT, mp_NMI };
-+	int i;
++	if (next->gs)
++		HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs); 
++
++	/* Must be after DS reload */
++	/*
++	 * This is basically '__unlazy_fpu'
++	 */
++	if (prev_p->thread_info->status & TS_USEDFPU) {
++		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++		HYPERVISOR_fpu_taskswitch(1);
++	}
++
++	/* 
++	 * Switch the PDA and FPU contexts.
++	 */
++	prev->userrsp = read_pda(oldrsp); 
++	write_pda(oldrsp, next->userrsp); 
++	write_pda(pcurrent, next_p); 
 +
++	write_pda(kernelstack,
++	(unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
++#ifdef CONFIG_CC_STACKPROTECTOR
++	write_pda(stack_canary, next_p->stack_canary);
 +	/*
-+	 * local APIC has default address
++	 * Build time only check to make sure the stack_canary is at
++	 * offset 40 in the pda; this is a gcc ABI requirement
 +	 */
-+	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++	BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
++#endif
 +
 +	/*
-+	 * 2 CPUs, numbered 0 & 1.
++	 * Now maybe reload the debug registers and handle I/O bitmaps
 +	 */
-+	processor.mpc_type = MP_PROCESSOR;
-+	/* Either an integrated APIC or a discrete 82489DX. */
-+	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+	processor.mpc_cpuflag = CPU_ENABLED;
-+	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-+				   (boot_cpu_data.x86_model << 4) |
-+				   boot_cpu_data.x86_mask;
-+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+	processor.mpc_reserved[0] = 0;
-+	processor.mpc_reserved[1] = 0;
-+	for (i = 0; i < 2; i++) {
-+		processor.mpc_apicid = i;
-+		MP_processor_info(&processor);
-+	}
-+
-+	bus.mpc_type = MP_BUS;
-+	bus.mpc_busid = 0;
-+	switch (mpc_default_type) {
-+		default:
-+			printk(KERN_ERR "???\nUnknown standard configuration %d\n",
-+				mpc_default_type);
-+			/* fall through */
-+		case 1:
-+		case 5:
-+			memcpy(bus.mpc_bustype, "ISA   ", 6);
-+			break;
-+		case 2:
-+		case 6:
-+		case 3:
-+			memcpy(bus.mpc_bustype, "EISA  ", 6);
-+			break;
-+		case 4:
-+		case 7:
-+			memcpy(bus.mpc_bustype, "MCA   ", 6);
-+	}
-+	MP_bus_info(&bus);
-+	if (mpc_default_type > 4) {
-+		bus.mpc_busid = 1;
-+		memcpy(bus.mpc_bustype, "PCI   ", 6);
-+		MP_bus_info(&bus);
-+	}
-+
-+	ioapic.mpc_type = MP_IOAPIC;
-+	ioapic.mpc_apicid = 2;
-+	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+	ioapic.mpc_flags = MPC_APIC_USABLE;
-+	ioapic.mpc_apicaddr = 0xFEC00000;
-+	MP_ioapic_info(&ioapic);
++	if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
++	    || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
++		__switch_to_xtra(prev_p, next_p);
 +
-+	/*
-+	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
++	/* If the task has used fpu the last 5 timeslices, just do a full
++	 * restore of the math state immediately to avoid the trap; the
++	 * chances of needing FPU soon are obviously high now
 +	 */
-+	construct_default_ioirq_mptable(mpc_default_type);
-+
-+	lintsrc.mpc_type = MP_LINTSRC;
-+	lintsrc.mpc_irqflag = 0;		/* conforming */
-+	lintsrc.mpc_srcbusid = 0;
-+	lintsrc.mpc_srcbusirq = 0;
-+	lintsrc.mpc_destapic = MP_APIC_ALL;
-+	for (i = 0; i < 2; i++) {
-+		lintsrc.mpc_irqtype = linttypes[i];
-+		lintsrc.mpc_destapiclint = i;
-+		MP_lintsrc_info(&lintsrc);
-+	}
++	if (next_p->fpu_counter>5)
++		math_state_restore();
++	return prev_p;
 +}
 +
-+static struct intel_mp_floating *mpf_found;
-+
 +/*
-+ * Scan the memory blocks for an SMP configuration block.
++ * sys_execve() executes a new program.
 + */
-+void __init get_smp_config (void)
++asmlinkage 
++long sys_execve(char __user *name, char __user * __user *argv,
++		char __user * __user *envp, struct pt_regs regs)
 +{
-+	struct intel_mp_floating *mpf = mpf_found;
-+
-+	/*
-+ 	 * ACPI supports both logical (e.g. Hyper-Threading) and physical 
-+ 	 * processors, where MPS only supports physical.
-+ 	 */
-+ 	if (acpi_lapic && acpi_ioapic) {
-+ 		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
-+ 		return;
-+	}
-+ 	else if (acpi_lapic)
-+ 		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++	long error;
++	char * filename;
 +
-+	printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
-+	if (mpf->mpf_feature2 & (1<<7)) {
-+		printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
-+		pic_mode = 1;
-+	} else {
-+		printk(KERN_INFO "    Virtual Wire compatibility mode.\n");
-+		pic_mode = 0;
++	filename = getname(name);
++	error = PTR_ERR(filename);
++	if (IS_ERR(filename)) 
++		return error;
++	error = do_execve(filename, argv, envp, &regs); 
++	if (error == 0) {
++		task_lock(current);
++		current->ptrace &= ~PT_DTRACE;
++		task_unlock(current);
 +	}
++	putname(filename);
++	return error;
++}
 +
-+	/*
-+	 * Now see if we need to read further.
-+	 */
-+	if (mpf->mpf_feature1 != 0) {
-+
-+		printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
-+		construct_default_ISA_mptable(mpf->mpf_feature1);
-+
-+	} else if (mpf->mpf_physptr) {
-+
-+		/*
-+		 * Read the physical hardware table.  Anything here will
-+		 * override the defaults.
-+		 */
-+ 		if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
-+			smp_found_config = 0;
-+			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
-+			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
-+			return;
-+		}
-+		/*
-+		 * If there are no explicit MP IRQ entries, then we are
-+		 * broken.  We set up most of the low 16 IO-APIC pins to
-+		 * ISA defaults and hope it will work.
-+		 */
-+		if (!mp_irq_entries) {
-+			struct mpc_config_bus bus;
++void set_personality_64bit(void)
++{
++	/* inherit personality from parent */
 +
-+			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++	/* Make sure to be in 64bit mode */
++	clear_thread_flag(TIF_IA32); 
 +
-+			bus.mpc_type = MP_BUS;
-+			bus.mpc_busid = 0;
-+			memcpy(bus.mpc_bustype, "ISA   ", 6);
-+			MP_bus_info(&bus);
++	/* TBD: overwrites user setup. Should have two bits.
++	   But 64bit processes have always behaved this way,
++	   so it's not too bad. The main problem is just that
++   	   32bit childs are affected again. */
++	current->personality &= ~READ_IMPLIES_EXEC;
++}
 +
-+			construct_default_ioirq_mptable(0);
-+		}
++asmlinkage long sys_fork(struct pt_regs *regs)
++{
++	return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
++}
 +
-+	} else
-+		BUG();
++asmlinkage long
++sys_clone(unsigned long clone_flags, unsigned long newsp,
++	  void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
++{
++	if (!newsp)
++		newsp = regs->rsp;
++	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
++}
 +
-+	printk(KERN_INFO "Processors: %d\n", num_processors);
-+	/*
-+	 * Only use the first configuration found.
-+	 */
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage long sys_vfork(struct pt_regs *regs)
++{
++	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
++		    NULL, NULL);
 +}
 +
-+static int __init smp_scan_config (unsigned long base, unsigned long length)
++unsigned long get_wchan(struct task_struct *p)
 +{
-+	extern void __bad_mpf_size(void); 
-+	unsigned int *bp = isa_bus_to_virt(base);
-+	struct intel_mp_floating *mpf;
++	unsigned long stack;
++	u64 fp,rip;
++	int count = 0;
++
++	if (!p || p == current || p->state==TASK_RUNNING)
++		return 0; 
++	stack = (unsigned long)task_stack_page(p);
++	if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
++		return 0;
++	fp = *(u64 *)(p->thread.rsp);
++	do { 
++		if (fp < (unsigned long)stack ||
++		    fp > (unsigned long)stack+THREAD_SIZE)
++			return 0; 
++		rip = *(u64 *)(fp+8); 
++		if (!in_sched_functions(rip))
++			return rip; 
++		fp = *(u64 *)fp; 
++	} while (count++ < 16); 
++	return 0;
++}
 +
-+	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
-+	if (sizeof(*mpf) != 16)
-+		__bad_mpf_size();
-+
-+	while (length > 0) {
-+		mpf = (struct intel_mp_floating *)bp;
-+		if ((*bp == SMP_MAGIC_IDENT) &&
-+			(mpf->mpf_length == 1) &&
-+			!mpf_checksum((unsigned char *)bp, 16) &&
-+			((mpf->mpf_specification == 1)
-+				|| (mpf->mpf_specification == 4)) ) {
++long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
++{ 
++	int ret = 0; 
++	int doit = task == current;
++	int cpu;
 +
-+			smp_found_config = 1;
-+			mpf_found = mpf;
-+			return 1;
++	switch (code) { 
++	case ARCH_SET_GS:
++		if (addr >= TASK_SIZE_OF(task))
++			return -EPERM; 
++		cpu = get_cpu();
++		/* handle small bases via the GDT because that's faster to 
++		   switch. */
++		if (addr <= 0xffffffff) {  
++			set_32bit_tls(task, GS_TLS, addr); 
++			if (doit) { 
++				load_TLS(&task->thread, cpu);
++				load_gs_index(GS_TLS_SEL); 
++			}
++			task->thread.gsindex = GS_TLS_SEL; 
++			task->thread.gs = 0;
++		} else { 
++			task->thread.gsindex = 0;
++			task->thread.gs = addr;
++			if (doit) {
++				load_gs_index(0);
++				ret = HYPERVISOR_set_segment_base(
++					SEGBASE_GS_USER, addr);
++			} 
++		}
++		put_cpu();
++		break;
++	case ARCH_SET_FS:
++		/* Not strictly needed for fs, but do it for symmetry
++		   with gs */
++		if (addr >= TASK_SIZE_OF(task))
++			return -EPERM; 
++		cpu = get_cpu();
++		/* handle small bases via the GDT because that's faster to 
++		   switch. */
++		if (addr <= 0xffffffff) { 
++			set_32bit_tls(task, FS_TLS, addr);
++			if (doit) { 
++				load_TLS(&task->thread, cpu); 
++				asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
++			}
++			task->thread.fsindex = FS_TLS_SEL;
++			task->thread.fs = 0;
++		} else { 
++			task->thread.fsindex = 0;
++			task->thread.fs = addr;
++			if (doit) {
++				/* set the selector to 0 to not confuse
++				   __switch_to */
++				asm volatile("movl %0,%%fs" :: "r" (0));
++                                ret = HYPERVISOR_set_segment_base(SEGBASE_FS,
++								  addr);
++			}
 +		}
-+		bp += 4;
-+		length -= 16;
++		put_cpu();
++		break;
++	case ARCH_GET_FS: { 
++		unsigned long base; 
++		if (task->thread.fsindex == FS_TLS_SEL)
++			base = read_32bit_tls(task, FS_TLS);
++		else if (doit)
++			rdmsrl(MSR_FS_BASE, base);
++		else
++			base = task->thread.fs;
++		ret = put_user(base, (unsigned long __user *)addr); 
++		break; 
 +	}
-+	return 0;
-+}
++	case ARCH_GET_GS: { 
++		unsigned long base;
++		unsigned gsindex;
++		if (task->thread.gsindex == GS_TLS_SEL)
++			base = read_32bit_tls(task, GS_TLS);
++		else if (doit) {
++ 			asm("movl %%gs,%0" : "=r" (gsindex));
++			if (gsindex)
++				rdmsrl(MSR_KERNEL_GS_BASE, base);
++			else
++				base = task->thread.gs;
++		}
++		else
++			base = task->thread.gs;
++		ret = put_user(base, (unsigned long __user *)addr); 
++		break;
++	}
++
++	default:
++		ret = -EINVAL;
++		break;
++	} 
 +
-+void __init find_intel_smp (void)
++	return ret;	
++} 
++
++long sys_arch_prctl(int code, unsigned long addr)
 +{
-+	unsigned int address;
++	return do_arch_prctl(current, code, addr);
++} 
 +
-+	/*
-+	 * FIXME: Linux assumes you have 640K of base ram..
-+	 * this continues the error...
-+	 *
-+	 * 1) Scan the bottom 1K for a signature
-+	 * 2) Scan the top 1K of base RAM
-+	 * 3) Scan the 64K of bios
-+	 */
-+	if (smp_scan_config(0x0,0x400) ||
-+		smp_scan_config(639*0x400,0x400) ||
-+			smp_scan_config(0xF0000,0x10000))
-+		return;
-+	/*
-+	 * If it is an SMP machine we should know now, unless the
-+	 * configuration is in an EISA/MCA bus machine with an
-+	 * extended bios data area.
-+	 *
-+	 * there is a real-mode segmented pointer pointing to the
-+	 * 4K EBDA area at 0x40E, calculate and scan it here.
-+	 *
-+	 * NOTE! There are Linux loaders that will corrupt the EBDA
-+	 * area, and as such this kind of SMP config may be less
-+	 * trustworthy, simply because the SMP table may have been
-+	 * stomped on during early boot. These loaders are buggy and
-+	 * should be fixed.
-+	 */
++/* 
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++	struct pt_regs *pp, ptregs;
 +
-+	address = *(unsigned short *)phys_to_virt(0x40E);
-+	address <<= 4;
-+	if (smp_scan_config(address, 0x1000))
-+		return;
++	pp = task_pt_regs(tsk);
++
++	ptregs = *pp; 
++	ptregs.cs &= 0xffff;
++	ptregs.ss &= 0xffff;
 +
-+	/* If we have come this far, we did not find an MP table  */
-+	 printk(KERN_INFO "No mptable found.\n");
++	elf_core_copy_regs(regs, &ptregs);
++ 
++        boot_option_idle_override = 1;
++	return 1;
 +}
 +
-+/*
-+ * - Intel MP Configuration Table
-+ */
-+void __init find_smp_config (void)
++unsigned long arch_align_stack(unsigned long sp)
 +{
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	find_intel_smp();
-+#endif
++	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
++		sp -= get_random_int() % 8192;
++	return sp & ~0xf;
 +}
 +
++#ifndef CONFIG_SMP
++void _restore_vcpu(void)
++{
++}
++#endif
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/setup64-xen.c b/arch/x86_64/kernel/setup64-xen.c
+--- a/arch/x86_64/kernel/setup64-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/setup64-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,356 @@
++/* 
++ * X86-64 specific CPU setup.
++ * Copyright (C) 1995  Linus Torvalds
++ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
++ * See setup.c for older changelog.
++ *
++ * Jun Nakajima <jun.nakajima at intel.com> 
++ *   Modified for Xen
++ *
++ */ 
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/bootmem.h>
++#include <linux/bitops.h>
++#include <linux/module.h>
++#include <asm/bootsetup.h>
++#include <asm/pda.h>
++#include <asm/pgtable.h>
++#include <asm/processor.h>
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/mmu_context.h>
++#include <asm/smp.h>
++#include <asm/i387.h>
++#include <asm/percpu.h>
++#include <asm/proto.h>
++#include <asm/sections.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++#endif
 +
-+/* --------------------------------------------------------------------------
-+                            ACPI-based MP Configuration
-+   -------------------------------------------------------------------------- */
++char x86_boot_params[BOOT_PARAM_SIZE] __initdata;
 +
-+#ifdef CONFIG_ACPI
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
 +
-+void __init mp_register_lapic_address (
-+	u64			address)
-+{
-+#ifndef CONFIG_XEN
-+	mp_lapic_addr = (unsigned long) address;
++struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
++EXPORT_SYMBOL(_cpu_pda);
++struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
 +
-+	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
++#ifndef CONFIG_X86_NO_IDT
++struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
++#endif
 +
-+	if (boot_cpu_id == -1U)
-+		boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
++char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
 +
-+	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
-+#endif
-+}
++unsigned long __supported_pte_mask __read_mostly = ~0UL;
++EXPORT_SYMBOL(__supported_pte_mask);
++static int do_not_nx __cpuinitdata = 0;
 +
++/* noexec=on|off
++Control non executable mappings for 64bit processes.
 +
-+void __cpuinit mp_register_lapic (
-+	u8			id, 
-+	u8			enabled)
++on	Enable(default)
++off	Disable
++*/ 
++static int __init nonx_setup(char *str)
 +{
-+	struct mpc_config_processor processor;
-+	int			boot_cpu = 0;
-+	
-+	if (id >= MAX_APICS) {
-+		printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
-+			id, MAX_APICS);
-+		return;
-+	}
++	if (!str)
++		return -EINVAL;
++	if (!strncmp(str, "on", 2)) {
++                __supported_pte_mask |= _PAGE_NX; 
++ 		do_not_nx = 0; 
++	} else if (!strncmp(str, "off", 3)) {
++		do_not_nx = 1;
++		__supported_pte_mask &= ~_PAGE_NX;
++        }
++	return 0;
++} 
++early_param("noexec", nonx_setup);
 +
-+	if (id == boot_cpu_physical_apicid)
-+		boot_cpu = 1;
++int force_personality32 = 0; 
 +
-+#ifndef CONFIG_XEN
-+	processor.mpc_type = MP_PROCESSOR;
-+	processor.mpc_apicid = id;
-+	processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
-+	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
-+	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
-+	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 
-+		(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
-+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+	processor.mpc_reserved[0] = 0;
-+	processor.mpc_reserved[1] = 0;
-+#endif
++/* noexec32=on|off
++Control non executable heap for 32bit processes.
++To control the stack too use noexec=off
 +
-+	MP_processor_info(&processor);
++on	PROT_READ does not imply PROT_EXEC for 32bit processes
++off	PROT_READ implies PROT_EXEC (default)
++*/
++static int __init nonx32_setup(char *str)
++{
++	if (!strcmp(str, "on"))
++		force_personality32 &= ~READ_IMPLIES_EXEC;
++	else if (!strcmp(str, "off"))
++		force_personality32 |= READ_IMPLIES_EXEC;
++	return 1;
 +}
++__setup("noexec32=", nonx32_setup);
 +
-+#ifdef CONFIG_X86_IO_APIC
++/*
++ * Great future plan:
++ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
++ * Always point %gs to its beginning
++ */
++void __init setup_per_cpu_areas(void)
++{ 
++	int i;
++	unsigned long size;
 +
-+#define MP_ISA_BUS		0
-+#define MP_MAX_IOAPIC_PIN	127
++#ifdef CONFIG_HOTPLUG_CPU
++	prefill_possible_map();
++#endif
 +
-+static struct mp_ioapic_routing {
-+	int			apic_id;
-+	int			gsi_start;
-+	int			gsi_end;
-+	u32			pin_programmed[4];
-+} mp_ioapic_routing[MAX_IO_APICS];
-+
-+
-+static int mp_find_ioapic (
-+	int			gsi)
-+{
-+	int			i = 0;
-+
-+	/* Find the IOAPIC that manages this GSI. */
-+	for (i = 0; i < nr_ioapics; i++) {
-+		if ((gsi >= mp_ioapic_routing[i].gsi_start)
-+			&& (gsi <= mp_ioapic_routing[i].gsi_end))
-+			return i;
-+	}
++	/* Copy section for each CPU (we discard the original) */
++	size = PERCPU_ENOUGH_ROOM;
 +
-+	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++	printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
++	for_each_cpu_mask (i, cpu_possible_map) {
++		char *ptr;
 +
-+	return -1;
++		if (!NODE_DATA(cpu_to_node(i))) {
++			printk("cpu with no node %d, num_online_nodes %d\n",
++			       i, num_online_nodes());
++			ptr = alloc_bootmem(size);
++		} else { 
++			ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
++		}
++		if (!ptr)
++			panic("Cannot allocate cpu data for CPU %d\n", i);
++		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
++		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
++	}
++} 
++
++#ifdef CONFIG_XEN
++static void switch_pt(void)
++{
++	xen_pt_switch(__pa(init_level4_pgt));
++        xen_new_user_pt(__pa(init_level4_user_pgt));
 +}
-+	
 +
-+void __init mp_register_ioapic (
-+	u8			id, 
-+	u32			address,
-+	u32			gsi_base)
++void __cpuinit cpu_gdt_init(struct desc_ptr *gdt_descr)
 +{
-+	int			idx = 0;
++	unsigned long frames[16];
++	unsigned long va;
++	int f;
 +
-+	if (nr_ioapics >= MAX_IO_APICS) {
-+		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
-+			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
-+		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
-+	}
-+	if (!address) {
-+		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
-+			" found in MADT table, skipping!\n");
-+		return;
++	for (va = gdt_descr->address, f = 0;
++	     va < gdt_descr->address + gdt_descr->size;
++	     va += PAGE_SIZE, f++) {
++		frames[f] = virt_to_mfn(va);
++		make_page_readonly(
++			(void *)va, XENFEAT_writable_descriptor_tables);
 +	}
++	if (HYPERVISOR_set_gdt(frames, gdt_descr->size /
++                               sizeof (struct desc_struct)))
++		BUG();
++}
++#else
++static void switch_pt(void)
++{
++	asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
++}
 +
-+	idx = nr_ioapics++;
++void __init cpu_gdt_init(struct desc_ptr *gdt_descr)
++{
++	asm volatile("lgdt %0" :: "m" (*gdt_descr));
++	asm volatile("lidt %0" :: "m" (idt_descr));
++}
++#endif
 +
-+	mp_ioapics[idx].mpc_type = MP_IOAPIC;
-+	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
-+	mp_ioapics[idx].mpc_apicaddr = address;
++void pda_init(int cpu)
++{ 
++	struct x8664_pda *pda = cpu_pda(cpu);
 +
++	/* Setup up data that may be needed in __get_free_pages early */
++	asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 
 +#ifndef CONFIG_XEN
-+	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++	/* Memory clobbers used to order PDA accessed */
++	mb();
++	wrmsrl(MSR_GS_BASE, pda);
++	mb();
++#else
++	HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, (unsigned long)pda);
 +#endif
-+	mp_ioapics[idx].mpc_apicid = id;
-+	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
-+	
-+	/* 
-+	 * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
-+	 * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
-+	 */
-+	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
-+	mp_ioapic_routing[idx].gsi_start = gsi_base;
-+	mp_ioapic_routing[idx].gsi_end = gsi_base + 
-+		io_apic_get_redir_entries(idx);
-+
-+	printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
-+		"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
-+		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
-+		mp_ioapic_routing[idx].gsi_start,
-+		mp_ioapic_routing[idx].gsi_end);
-+
-+	return;
-+}
-+
-+
-+void __init mp_override_legacy_irq (
-+	u8			bus_irq,
-+	u8			polarity, 
-+	u8			trigger, 
-+	u32			gsi)
-+{
-+	struct mpc_config_intsrc intsrc;
-+	int			ioapic = -1;
-+	int			pin = -1;
-+
-+	/* 
-+	 * Convert 'gsi' to 'ioapic.pin'.
-+	 */
-+	ioapic = mp_find_ioapic(gsi);
-+	if (ioapic < 0)
-+		return;
-+	pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
-+
-+	/*
-+	 * TBD: This check is for faulty timer entries, where the override
-+	 *      erroneously sets the trigger to level, resulting in a HUGE 
-+	 *      increase of timer interrupts!
-+	 */
-+	if ((bus_irq == 0) && (trigger == 3))
-+		trigger = 1;
-+
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqtype = mp_INT;
-+	intsrc.mpc_irqflag = (trigger << 2) | polarity;
-+	intsrc.mpc_srcbus = MP_ISA_BUS;
-+	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
-+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
-+	intsrc.mpc_dstirq = pin;				    /* INTIN# */
++	pda->cpunumber = cpu; 
++	pda->irqcount = -1;
++	pda->kernelstack = 
++		(unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; 
++	pda->active_mm = &init_mm;
++	pda->mmu_state = 0;
 +
-+	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", 
-+		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-+		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-+		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++	if (cpu == 0) {
++#ifdef CONFIG_XEN
++		xen_init_pt();
++#endif
++		/* others are initialized in smpboot.c */
++		pda->pcurrent = &init_task;
++		pda->irqstackptr = boot_cpu_stack; 
++	} else {
++		pda->irqstackptr = (char *)
++			__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
++		if (!pda->irqstackptr)
++			panic("cannot allocate irqstack for cpu %d", cpu); 
++	}
 +
-+	mp_irqs[mp_irq_entries] = intsrc;
-+	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+		panic("Max # of irq sources exceeded!\n");
++	switch_pt();
 +
-+	return;
-+}
++	pda->irqstackptr += IRQSTACKSIZE-64;
++} 
 +
++#ifndef CONFIG_X86_NO_TSS
++char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
++__attribute__((section(".bss.page_aligned")));
++#endif
 +
-+void __init mp_config_acpi_legacy_irqs (void)
++/* May not be marked __init: used by software suspend */
++void syscall_init(void)
 +{
-+	struct mpc_config_intsrc intsrc;
-+	int			i = 0;
-+	int			ioapic = -1;
-+
-+	/* 
-+	 * Fabricate the legacy ISA bus (bus #31).
-+	 */
-+	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
-+	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
-+
++#ifndef CONFIG_XEN
 +	/* 
-+	 * Locate the IOAPIC that manages the ISA IRQs (0-15). 
-+	 */
-+	ioapic = mp_find_ioapic(0);
-+	if (ioapic < 0)
-+		return;
++	 * LSTAR and STAR live in a bit strange symbiosis.
++	 * They both write to the same internal register. STAR allows to set CS/DS
++	 * but only a 32bit target. LSTAR sets the 64bit rip. 	 
++	 */ 
++	wrmsrl(MSR_STAR,  ((u64)__USER32_CS)<<48  | ((u64)__KERNEL_CS)<<32); 
++	wrmsrl(MSR_LSTAR, system_call); 
 +
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqflag = 0;					/* Conforming */
-+	intsrc.mpc_srcbus = MP_ISA_BUS;
-+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++	/* Flags to clear on syscall */
++	wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); 
++#endif
++#ifdef CONFIG_IA32_EMULATION   		
++	syscall32_cpu_init ();
++#endif
++}
 +
-+	/* 
-+	 * Use the default configuration for the IRQs 0-15.  Unless
-+	 * overridden by (MADT) interrupt source override entries.
-+	 */
-+	for (i = 0; i < 16; i++) {
-+		int idx;
++void __cpuinit check_efer(void)
++{
++	unsigned long efer;
 +
-+		for (idx = 0; idx < mp_irq_entries; idx++) {
-+			struct mpc_config_intsrc *irq = mp_irqs + idx;
++	rdmsrl(MSR_EFER, efer); 
++        if (!(efer & EFER_NX) || do_not_nx) { 
++                __supported_pte_mask &= ~_PAGE_NX; 
++        }       
++}
 +
-+			/* Do we already have a mapping for this ISA IRQ? */
-+			if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
-+				break;
++unsigned long kernel_eflags;
 +
-+			/* Do we already have a mapping for this IOAPIC pin */
-+			if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
-+				(irq->mpc_dstirq == i))
-+				break;
-+		}
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ * A lot of state is already set up in PDA init.
++ */
++void __cpuinit cpu_init (void)
++{
++	int cpu = stack_smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++	struct tss_struct *t = &per_cpu(init_tss, cpu);
++	struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
++	unsigned long v; 
++	char *estacks = NULL; 
++	int i;
++#endif
++	struct task_struct *me;
 +
-+		if (idx != mp_irq_entries) {
-+			printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
-+			continue;			/* IRQ already used */
-+		}
-+
-+		intsrc.mpc_irqtype = mp_INT;
-+		intsrc.mpc_srcbusirq = i;		   /* Identity mapped */
-+		intsrc.mpc_dstirq = i;
-+
-+		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
-+			"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-+			(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-+			intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 
-+			intsrc.mpc_dstirq);
-+
-+		mp_irqs[mp_irq_entries] = intsrc;
-+		if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+			panic("Max # of irq sources exceeded!\n");
++	/* CPU 0 is initialised in head64.c */
++	if (cpu != 0) {
++		pda_init(cpu);
++		zap_low_mappings(cpu);
 +	}
++#ifndef CONFIG_X86_NO_TSS
++	else
++		estacks = boot_exception_stacks; 
++#endif
 +
-+	return;
-+}
++	me = current;
 +
-+#define MAX_GSI_NUM	4096
++	if (cpu_test_and_set(cpu, cpu_initialized))
++		panic("CPU#%d already initialized!\n", cpu);
 +
-+int mp_register_gsi(u32 gsi, int triggering, int polarity)
-+{
-+	int			ioapic = -1;
-+	int			ioapic_pin = 0;
-+	int			idx, bit = 0;
-+	static int		pci_irq = 16;
-+	/*
-+	 * Mapping between Global System Interrupts, which
-+	 * represent all possible interrupts, to the IRQs
-+	 * assigned to actual devices.
-+	 */
-+	static int		gsi_to_irq[MAX_GSI_NUM];
++	printk("Initializing CPU#%d\n", cpu);
 +
-+	if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
-+		return gsi;
++	clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
 +
-+	/* Don't set up the ACPI SCI because it's already set up */
-+	if (acpi_fadt.sci_int == gsi)
-+		return gsi;
++	/*
++	 * Initialize the per-CPU GDT with the boot GDT,
++	 * and set up the GDT descriptor:
++	 */
++#ifndef CONFIG_XEN 
++	if (cpu)
++ 		memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
++#endif
 +
-+	ioapic = mp_find_ioapic(gsi);
-+	if (ioapic < 0) {
-+		printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
-+		return gsi;
-+	}
++	cpu_gdt_descr[cpu].size = GDT_SIZE;
++	cpu_gdt_init(&cpu_gdt_descr[cpu]);
 +
-+	ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
++	syscall_init();
 +
-+	/* 
-+	 * Avoid pin reprogramming.  PRTs typically include entries  
-+	 * with redundant pin->gsi mappings (but unique PCI devices);
-+	 * we only program the IOAPIC on the first.
-+	 */
-+	bit = ioapic_pin % 32;
-+	idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
-+	if (idx > 3) {
-+		printk(KERN_ERR "Invalid reference to IOAPIC pin "
-+			"%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 
-+			ioapic_pin);
-+		return gsi;
-+	}
-+	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
-+		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
-+			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
-+		return gsi_to_irq[gsi];
-+	}
++	wrmsrl(MSR_FS_BASE, 0);
++	wrmsrl(MSR_KERNEL_GS_BASE, 0);
++	barrier(); 
 +
-+	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++	check_efer();
 +
-+	if (triggering == ACPI_LEVEL_SENSITIVE) {
-+		/*
-+		 * For PCI devices assign IRQs in order, avoiding gaps
-+		 * due to unused I/O APIC pins.
-+		 */
-+		int irq = gsi;
-+		if (gsi < MAX_GSI_NUM) {
-+			/*
-+			 * Retain the VIA chipset work-around (gsi > 15), but
-+			 * avoid a problem where the 8254 timer (IRQ0) is setup
-+			 * via an override (so it's not on pin 0 of the ioapic),
-+			 * and at the same time, the pin 0 interrupt is a PCI
-+			 * type.  The gsi > 15 test could cause these two pins
-+			 * to be shared as IRQ0, and they are not shareable.
-+			 * So test for this condition, and if necessary, avoid
-+			 * the pin collision.
-+			 */
-+			if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
-+				gsi = pci_irq++;
-+			/*
-+			 * Don't assign IRQ used by ACPI SCI
-+			 */
-+			if (gsi == acpi_fadt.sci_int)
-+				gsi = pci_irq++;
-+			gsi_to_irq[irq] = gsi;
-+		} else {
-+			printk(KERN_ERR "GSI %u is too high\n", gsi);
-+			return gsi;
++#ifndef CONFIG_X86_NO_TSS
++	/*
++	 * set up and load the per-CPU TSS
++	 */
++	for (v = 0; v < N_EXCEPTION_STACKS; v++) {
++		static const unsigned int order[N_EXCEPTION_STACKS] = {
++			[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
++			[DEBUG_STACK - 1] = DEBUG_STACK_ORDER
++		};
++		if (cpu) {
++			estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
++			if (!estacks)
++				panic("Cannot allocate exception stack %ld %d\n",
++				      v, cpu); 
 +		}
++		estacks += PAGE_SIZE << order[v];
++		orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
 +	}
 +
-+	io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
-+		triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
-+		polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
-+	return gsi;
-+}
-+
-+#endif /*CONFIG_X86_IO_APIC*/
-+#endif /*CONFIG_ACPI*/
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/pci-swiotlb-xen.c linux-2.6.18-xen/arch/x86_64/kernel/pci-swiotlb-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/pci-swiotlb-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/pci-swiotlb-xen.c	2006-11-19 14:26:33.000000000 +0100
-@@ -0,0 +1,54 @@
-+/* Glue code to lib/swiotlb.c */
++	t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
++	/*
++	 * <= is required because the CPU will access up to
++	 * 8 bits beyond the end of the IO permission bitmap.
++	 */
++	for (i = 0; i <= IO_BITMAP_LONGS; i++)
++		t->io_bitmap[i] = ~0UL;
++#endif
 +
-+#include <linux/pci.h>
-+#include <linux/cache.h>
-+#include <linux/module.h>
-+#include <asm/dma-mapping.h>
-+#include <asm/proto.h>
-+#include <asm/swiotlb.h>
-+#include <asm/dma.h>
++	atomic_inc(&init_mm.mm_count);
++	me->active_mm = &init_mm;
++	if (me->mm)
++		BUG();
++	enter_lazy_tlb(&init_mm, me);
 +
-+#if 0
-+int swiotlb __read_mostly;
-+EXPORT_SYMBOL(swiotlb);
++#ifndef CONFIG_X86_NO_TSS
++	set_tss_desc(cpu, t);
 +#endif
-+
-+struct dma_mapping_ops swiotlb_dma_ops = {
-+#if 0
-+	.mapping_error = swiotlb_dma_mapping_error,
-+	.alloc_coherent = swiotlb_alloc_coherent,
-+	.free_coherent = swiotlb_free_coherent,
-+	.map_single = swiotlb_map_single,
-+	.unmap_single = swiotlb_unmap_single,
-+	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
-+	.sync_single_for_device = swiotlb_sync_single_for_device,
-+	.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
-+	.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
-+	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
-+	.sync_sg_for_device = swiotlb_sync_sg_for_device,
-+	.map_sg = swiotlb_map_sg,
-+	.unmap_sg = swiotlb_unmap_sg,
-+	.dma_supported = NULL,
++#ifndef CONFIG_XEN
++	load_TR_desc();
 +#endif
-+};
++	load_LDT(&init_mm.context);
 +
-+void pci_swiotlb_init(void)
-+{
-+#if 0
-+	/* don't initialize swiotlb if iommu=off (no_iommu=1) */
-+	if (!iommu_aperture && !no_iommu &&
-+	    (end_pfn > MAX_DMA32_PFN || force_iommu))
-+	       swiotlb = 1;
-+	if (swiotlb) {
-+		printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
-+		swiotlb_init();
-+		dma_ops = &swiotlb_dma_ops;
-+	}
-+#else
-+	swiotlb_init();
-+	if (swiotlb) {
-+		printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
-+		dma_ops = &swiotlb_dma_ops;
-+	}
-+#endif
++	/*
++	 * Clear all 6 debug registers:
++	 */
++
++	set_debugreg(0UL, 0);
++	set_debugreg(0UL, 1);
++	set_debugreg(0UL, 2);
++	set_debugreg(0UL, 3);
++	set_debugreg(0UL, 6);
++	set_debugreg(0UL, 7);
++
++	fpu_init(); 
++
++	raw_local_save_flags(kernel_eflags);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/process-xen.c linux-2.6.18-xen/arch/x86_64/kernel/process-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/process-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/process-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,787 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/setup-xen.c b/arch/x86_64/kernel/setup-xen.c
+--- a/arch/x86_64/kernel/setup-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/setup-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,1478 @@
 +/*
-+ *  linux/arch/x86-64/kernel/process.c
++ *  linux/arch/x86-64/kernel/setup.c
 + *
 + *  Copyright (C) 1995  Linus Torvalds
 + *
-+ *  Pentium III FXSR, SSE support
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
-+ * 
-+ *  X86-64 port
-+ *	Andi Kleen.
-+ *
-+ *	CPU hotplug support - ashok.raj at intel.com
-+ * 
-+ *  Jun Nakajima <jun.nakajima at intel.com> 
-+ *     Modified for Xen
++ *  Nov 2001 Dave Jones <davej at suse.de>
++ *  Forked from i386 setup code.
 + */
 +
 +/*
-+ * This file handles the architecture-dependent parts of process handling..
++ * This file handles the architecture-dependent parts of initialization
 + */
 +
-+#include <stdarg.h>
-+
-+#include <linux/cpu.h>
 +#include <linux/errno.h>
 +#include <linux/sched.h>
 +#include <linux/kernel.h>
 +#include <linux/mm.h>
-+#include <linux/elfcore.h>
-+#include <linux/smp.h>
++#include <linux/stddef.h>
++#include <linux/unistd.h>
++#include <linux/ptrace.h>
 +#include <linux/slab.h>
 +#include <linux/user.h>
-+#include <linux/module.h>
 +#include <linux/a.out.h>
-+#include <linux/interrupt.h>
++#include <linux/screen_info.h>
++#include <linux/ioport.h>
 +#include <linux/delay.h>
-+#include <linux/ptrace.h>
-+#include <linux/utsname.h>
-+#include <linux/random.h>
-+#include <linux/notifier.h>
-+#include <linux/kprobes.h>
++#include <linux/init.h>
++#include <linux/initrd.h>
++#include <linux/highmem.h>
++#include <linux/bootmem.h>
++#include <linux/module.h>
++#include <asm/processor.h>
++#include <linux/console.h>
++#include <linux/seq_file.h>
++#include <linux/crash_dump.h>
++#include <linux/root_dev.h>
++#include <linux/pci.h>
++#include <linux/acpi.h>
++#include <linux/kallsyms.h>
++#include <linux/edd.h>
++#include <linux/mmzone.h>
++#include <linux/kexec.h>
++#include <linux/cpufreq.h>
++#include <linux/dmi.h>
++#include <linux/dma-mapping.h>
++#include <linux/ctype.h>
 +
++#include <asm/mtrr.h>
 +#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
 +#include <asm/system.h>
 +#include <asm/io.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/mmu_context.h>
-+#include <asm/pda.h>
-+#include <asm/prctl.h>
-+#include <asm/kdebug.h>
-+#include <xen/interface/dom0_ops.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/vcpu.h>
++#include <asm/smp.h>
++#include <asm/msr.h>
 +#include <asm/desc.h>
++#include <video/edid.h>
++#include <asm/e820.h>
++#include <asm/dma.h>
++#include <asm/mpspec.h>
++#include <asm/mmu_context.h>
++#include <asm/bootsetup.h>
 +#include <asm/proto.h>
-+#include <asm/hardirq.h>
-+#include <asm/ia32.h>
-+#include <asm/idle.h>
-+
-+#include <xen/cpu_hotplug.h>
-+
-+asmlinkage extern void ret_from_fork(void);
-+
-+unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
-+
-+unsigned long boot_option_idle_override = 0;
-+EXPORT_SYMBOL(boot_option_idle_override);
-+
-+/*
-+ * Powermanagement idle function, if any..
-+ */
-+void (*pm_idle)(void);
-+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-+
-+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
++#include <asm/setup.h>
++#include <asm/mach_apic.h>
++#include <asm/numa.h>
++#include <asm/sections.h>
++#include <asm/dmi.h>
++#ifdef CONFIG_XEN
++#include <linux/percpu.h>
++#include <linux/pfn.h>
++#include <xen/interface/physdev.h>
++#include "setup_arch_pre.h"
++#include <asm/hypervisor.h>
++#include <xen/interface/nmi.h>
++#include <xen/features.h>
++#include <xen/xencons.h>
++#include <asm/mach-xen/setup_arch_post.h>
++#include <xen/interface/memory.h>
 +
-+void idle_notifier_register(struct notifier_block *n)
-+{
-+	atomic_notifier_chain_register(&idle_notifier, n);
-+}
-+EXPORT_SYMBOL_GPL(idle_notifier_register);
++extern unsigned long start_pfn;
 +
-+void idle_notifier_unregister(struct notifier_block *n)
-+{
-+	atomic_notifier_chain_unregister(&idle_notifier, n);
-+}
-+EXPORT_SYMBOL(idle_notifier_unregister);
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
 +
-+enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
-+static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
 +
-+void enter_idle(void)
-+{
-+	__get_cpu_var(idle_state) = CPU_IDLE;
-+	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
-+}
++/* Allows setting of maximum possible memory size  */
++unsigned long xen_override_max_pfn;
 +
-+static void __exit_idle(void)
-+{
-+	__get_cpu_var(idle_state) = CPU_NOT_IDLE;
-+	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
-+}
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++	xen_panic_event, NULL, 0 /* try to go last */
++};
 +
-+/* Called from interrupts to signify idle end */
-+void exit_idle(void)
-+{
-+	if (current->pid | read_pda(irqcount))
-+		return;
-+	__exit_idle();
-+}
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
 +
-+/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
-+void xen_idle(void)
-+{
-+	local_irq_disable();
++EXPORT_SYMBOL(phys_to_machine_mapping);
 +
-+	if (need_resched())
-+		local_irq_enable();
-+	else {
-+		current_thread_info()->status &= ~TS_POLLING;
-+		smp_mb__after_clear_bit();
-+		safe_halt();
-+		current_thread_info()->status |= TS_POLLING;
-+	}
-+}
++DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
++DEFINE_PER_CPU(int, nr_multicall_ents);
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+static inline void play_dead(void)
-+{
-+	idle_task_exit();
-+	local_irq_disable();
-+	cpu_clear(smp_processor_id(), cpu_initialized);
-+	preempt_enable_no_resched();
-+	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-+	cpu_bringup();
-+}
-+#else
-+static inline void play_dead(void)
-+{
-+	BUG();
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++#endif
 +
 +/*
-+ * The idle thread. There's no useful work to be
-+ * done, so just try to conserve power and have a
-+ * low exit latency (ie sit in a loop waiting for
-+ * somebody to say that they'd like to reschedule)
++ * Machine setup..
 + */
-+void cpu_idle (void)
-+{
-+	current_thread_info()->status |= TS_POLLING;
-+	/* endless idle loop with no priority at all */
-+	while (1) {
-+		while (!need_resched()) {
-+			if (__get_cpu_var(cpu_idle_state))
-+				__get_cpu_var(cpu_idle_state) = 0;
-+			rmb();
-+			
-+			if (cpu_is_offline(smp_processor_id()))
-+				play_dead();
-+			enter_idle();
-+			xen_idle();
-+			__exit_idle();
-+		}
-+
-+		preempt_enable_no_resched();
-+		schedule();
-+		preempt_disable();
-+	}
-+}
-+
-+void cpu_idle_wait(void)
-+{
-+	unsigned int cpu, this_cpu = get_cpu();
-+	cpumask_t map;
-+
-+	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
-+	put_cpu();
-+
-+ 	cpus_clear(map);
-+	for_each_online_cpu(cpu) {
-+		per_cpu(cpu_idle_state, cpu) = 1;
-+		cpu_set(cpu, map);
-+	}
-+
-+	__get_cpu_var(cpu_idle_state) = 0;
-+
-+	wmb();
-+	do {
-+		ssleep(1);
-+		for_each_online_cpu(cpu) {
-+			if (cpu_isset(cpu, map) &&
-+					!per_cpu(cpu_idle_state, cpu))
-+				cpu_clear(cpu, map);
-+		}
-+		cpus_and(map, map, cpu_online_map);
-+	} while (!cpus_empty(map));
-+}
-+EXPORT_SYMBOL_GPL(cpu_idle_wait);
-+
-+/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
-+/* Always use xen_idle() instead. */
-+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) {}
-+
-+/* Prints also some state that isn't saved in the pt_regs */ 
-+void __show_regs(struct pt_regs * regs)
-+{
-+	unsigned long fs, gs, shadowgs;
-+	unsigned int fsindex,gsindex;
-+	unsigned int ds,cs,es; 
-+
-+	printk("\n");
-+	print_modules();
-+	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
-+		current->pid, current->comm, print_tainted(),
-+		system_utsname.release,
-+		(int)strcspn(system_utsname.version, " "),
-+		system_utsname.version);
-+	printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
-+	printk_address(regs->rip); 
-+	printk("RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, regs->rsp,
-+		regs->eflags);
-+	printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
-+	       regs->rax, regs->rbx, regs->rcx);
-+	printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
-+	       regs->rdx, regs->rsi, regs->rdi); 
-+	printk("RBP: %016lx R08: %016lx R09: %016lx\n",
-+	       regs->rbp, regs->r8, regs->r9); 
-+	printk("R10: %016lx R11: %016lx R12: %016lx\n",
-+	       regs->r10, regs->r11, regs->r12); 
-+	printk("R13: %016lx R14: %016lx R15: %016lx\n",
-+	       regs->r13, regs->r14, regs->r15); 
 +
-+	asm("mov %%ds,%0" : "=r" (ds)); 
-+	asm("mov %%cs,%0" : "=r" (cs)); 
-+	asm("mov %%es,%0" : "=r" (es)); 
-+	asm("mov %%fs,%0" : "=r" (fsindex));
-+	asm("mov %%gs,%0" : "=r" (gsindex));
++struct cpuinfo_x86 boot_cpu_data __read_mostly;
++EXPORT_SYMBOL(boot_cpu_data);
 +
-+	rdmsrl(MSR_FS_BASE, fs);
-+	rdmsrl(MSR_GS_BASE, gs); 
-+	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 
++unsigned long mmu_cr4_features;
 +
-+	printk("FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 
-+	       fs,fsindex,gs,gsindex,shadowgs); 
-+	printk("CS:  %04x DS: %04x ES: %04x\n", cs, ds, es); 
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
 +
-+}
++unsigned long saved_video_mode;
 +
-+void show_regs(struct pt_regs *regs)
-+{
-+	printk("CPU %d:", smp_processor_id());
-+	__show_regs(regs);
-+	show_trace(NULL, regs, &regs->rsp);
-+}
++/* 
++ * Early DMI memory
++ */
++int dmi_alloc_index;
++char dmi_alloc_data[DMI_MAX_DATA];
 +
 +/*
-+ * Free current thread data structures etc..
++ * Setup options
 + */
-+void exit_thread(void)
-+{
-+	struct task_struct *me = current;
-+	struct thread_struct *t = &me->thread;
-+
-+	if (me->thread.io_bitmap_ptr) { 
-+#ifndef CONFIG_X86_NO_TSS
-+		struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
-+#endif
-+#ifdef CONFIG_XEN
-+		struct physdev_set_iobitmap iobmp_op = { 0 };
-+#endif
++struct screen_info screen_info;
++EXPORT_SYMBOL(screen_info);
++struct sys_desc_table_struct {
++	unsigned short length;
++	unsigned char table[0];
++};
 +
-+		kfree(t->io_bitmap_ptr);
-+		t->io_bitmap_ptr = NULL;
-+		/*
-+		 * Careful, clear this in the TSS too:
-+		 */
-+#ifndef CONFIG_X86_NO_TSS
-+		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
-+		put_cpu();
-+#endif
++struct edid_info edid_info;
++EXPORT_SYMBOL_GPL(edid_info);
 +#ifdef CONFIG_XEN
-+		HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobmp_op);
++struct e820map machine_e820;
 +#endif
-+		t->io_bitmap_max = 0;
-+	}
-+}
-+
-+void load_gs_index(unsigned gs)
-+{
-+	HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs);
-+}
-+
-+void flush_thread(void)
-+{
-+	struct task_struct *tsk = current;
-+	struct thread_info *t = current_thread_info();
-+
-+	if (t->flags & _TIF_ABI_PENDING) {
-+		t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
-+		if (t->flags & _TIF_IA32)
-+			current_thread_info()->status |= TS_COMPAT;
-+	}
-+
-+
-+	tsk->thread.debugreg0 = 0;
-+	tsk->thread.debugreg1 = 0;
-+	tsk->thread.debugreg2 = 0;
-+	tsk->thread.debugreg3 = 0;
-+	tsk->thread.debugreg6 = 0;
-+	tsk->thread.debugreg7 = 0;
-+	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
-+	/*
-+	 * Forget coprocessor state..
-+	 */
-+	clear_fpu(tsk);
-+	clear_used_math();
-+}
 +
-+void release_thread(struct task_struct *dead_task)
-+{
-+	if (dead_task->mm) {
-+		if (dead_task->mm->context.size) {
-+			printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
-+					dead_task->comm,
-+					dead_task->mm->context.ldt,
-+					dead_task->mm->context.size);
-+			BUG();
-+		}
-+	}
-+}
++extern int root_mountflags;
 +
-+static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
-+{
-+	struct user_desc ud = { 
-+		.base_addr = addr,
-+		.limit = 0xfffff,
-+		.seg_32bit = 1,
-+		.limit_in_pages = 1,
-+		.useable = 1,
-+	};
-+	struct n_desc_struct *desc = (void *)t->thread.tls_array;
-+	desc += tls;
-+	desc->a = LDT_entry_a(&ud); 
-+	desc->b = LDT_entry_b(&ud); 
-+}
++char command_line[COMMAND_LINE_SIZE];
 +
-+static inline u32 read_32bit_tls(struct task_struct *t, int tls)
-+{
-+	struct desc_struct *desc = (void *)t->thread.tls_array;
-+	desc += tls;
-+	return desc->base0 | 
-+		(((u32)desc->base1) << 16) | 
-+		(((u32)desc->base2) << 24);
-+}
++struct resource standard_io_resources[] = {
++	{ .name = "dma1", .start = 0x00, .end = 0x1f,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "pic1", .start = 0x20, .end = 0x21,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "timer0", .start = 0x40, .end = 0x43,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "timer1", .start = 0x50, .end = 0x53,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "keyboard", .start = 0x60, .end = 0x6f,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "pic2", .start = 0xa0, .end = 0xa1,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "dma2", .start = 0xc0, .end = 0xdf,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "fpu", .start = 0xf0, .end = 0xff,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO }
++};
 +
-+/*
-+ * This gets called before we allocate a new thread and copy
-+ * the current task into it.
-+ */
-+void prepare_to_copy(struct task_struct *tsk)
-+{
-+	unlazy_fpu(tsk);
-+}
++#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
 +
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, 
-+		unsigned long unused,
-+	struct task_struct * p, struct pt_regs * regs)
-+{
-+	int err;
-+	struct pt_regs * childregs;
-+	struct task_struct *me = current;
++struct resource data_resource = {
++	.name = "Kernel data",
++	.start = 0,
++	.end = 0,
++	.flags = IORESOURCE_RAM,
++};
++struct resource code_resource = {
++	.name = "Kernel code",
++	.start = 0,
++	.end = 0,
++	.flags = IORESOURCE_RAM,
++};
 +
-+	childregs = ((struct pt_regs *)
-+			(THREAD_SIZE + task_stack_page(p))) - 1;
-+	*childregs = *regs;
++#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
 +
-+	childregs->rax = 0;
-+	childregs->rsp = rsp;
-+	if (rsp == ~0UL)
-+		childregs->rsp = (unsigned long)childregs;
++static struct resource system_rom_resource = {
++	.name = "System ROM",
++	.start = 0xf0000,
++	.end = 0xfffff,
++	.flags = IORESOURCE_ROM,
++};
 +
-+	p->thread.rsp = (unsigned long) childregs;
-+	p->thread.rsp0 = (unsigned long) (childregs+1);
-+	p->thread.userrsp = me->thread.userrsp; 
++static struct resource extension_rom_resource = {
++	.name = "Extension ROM",
++	.start = 0xe0000,
++	.end = 0xeffff,
++	.flags = IORESOURCE_ROM,
++};
 +
-+	set_tsk_thread_flag(p, TIF_FORK);
++static struct resource adapter_rom_resources[] = {
++	{ .name = "Adapter ROM", .start = 0xc8000, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM }
++};
 +
-+	p->thread.fs = me->thread.fs;
-+	p->thread.gs = me->thread.gs;
++static struct resource video_rom_resource = {
++	.name = "Video ROM",
++	.start = 0xc0000,
++	.end = 0xc7fff,
++	.flags = IORESOURCE_ROM,
++};
 +
-+	asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
-+	asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
-+	asm("mov %%es,%0" : "=m" (p->thread.es));
-+	asm("mov %%ds,%0" : "=m" (p->thread.ds));
++static struct resource video_ram_resource = {
++	.name = "Video RAM area",
++	.start = 0xa0000,
++	.end = 0xbffff,
++	.flags = IORESOURCE_RAM,
++};
 +
-+	if (unlikely(me->thread.io_bitmap_ptr != NULL)) { 
-+		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+		if (!p->thread.io_bitmap_ptr) {
-+			p->thread.io_bitmap_max = 0;
-+			return -ENOMEM;
-+		}
-+		memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
-+				IO_BITMAP_BYTES);
-+	} 
++#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
 +
-+	/*
-+	 * Set a new TLS for the child thread?
-+	 */
-+	if (clone_flags & CLONE_SETTLS) {
-+#ifdef CONFIG_IA32_EMULATION
-+		if (test_thread_flag(TIF_IA32))
-+			err = ia32_child_tls(p, childregs); 
-+		else 			
-+#endif	 
-+			err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 
-+		if (err) 
-+			goto out;
-+	}
-+        p->thread.iopl = current->thread.iopl;
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++	unsigned char *p, sum = 0;
 +
-+	err = 0;
-+out:
-+	if (err && p->thread.io_bitmap_ptr) {
-+		kfree(p->thread.io_bitmap_ptr);
-+		p->thread.io_bitmap_max = 0;
-+	}
-+	return err;
++	for (p = rom; p < rom + length; p++)
++		sum += *p;
++	return sum == 0;
 +}
 +
-+static inline void __save_init_fpu( struct task_struct *tsk )
++static void __init probe_roms(void)
 +{
-+	asm volatile( "rex64 ; fxsave %0 ; fnclex"
-+		      : "=m" (tsk->thread.i387.fxsave));
-+	tsk->thread_info->status &= ~TS_USEDFPU;
-+}
++	unsigned long start, length, upper;
++	unsigned char *rom;
++	int	      i;
 +
-+/*
-+ *	switch_to(x,y) should switch tasks from x to y.
-+ *
-+ * This could still be optimized: 
-+ * - fold all the options into a flag word and test it with a single test.
-+ * - could test fs/gs bitsliced
-+ *
-+ * Kprobes not supported here. Set the probe on schedule instead.
-+ */
-+__kprobes struct task_struct *
-+__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+	struct thread_struct *prev = &prev_p->thread,
-+				 *next = &next_p->thread;
-+	int cpu = smp_processor_id();  
-+#ifndef CONFIG_X86_NO_TSS
-+	struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#ifdef CONFIG_XEN
++	/* Nothing to do if not running in dom0. */
++	if (!is_initial_xendomain())
++		return;
 +#endif
-+	struct physdev_set_iopl iopl_op;
-+	struct physdev_set_iobitmap iobmp_op;
-+	multicall_entry_t _mcl[8], *mcl = _mcl;
-+
-+	/*
-+	 * Reload esp0, LDT and the page table pointer:
-+	 */
-+	mcl->op      = __HYPERVISOR_stack_switch;
-+	mcl->args[0] = __KERNEL_DS;
-+	mcl->args[1] = next->rsp0;
-+	mcl++;
 +
-+	/*
-+	 * Load the per-thread Thread-Local Storage descriptor.
-+	 * This is load_TLS(next, cpu) with multicalls.
-+	 */
-+#define C(i) do {							\
-+	if (unlikely(next->tls_array[i] != prev->tls_array[i])) {	\
-+		mcl->op      = __HYPERVISOR_update_descriptor;		\
-+		mcl->args[0] = virt_to_machine(				\
-+			&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]);		\
-+		mcl->args[1] = next->tls_array[i];			\
-+		mcl++;							\
-+	}								\
-+} while (0)
-+	C(0); C(1); C(2);
-+#undef C
++	/* video rom */
++	upper = adapter_rom_resources[0].start;
++	for (start = video_rom_resource.start; start < upper; start += 2048) {
++		rom = isa_bus_to_virt(start);
++		if (!romsignature(rom))
++			continue;
 +
-+	if (unlikely(prev->iopl != next->iopl)) {
-+		iopl_op.iopl = (next->iopl == 0) ? 1 : next->iopl;
-+		mcl->op      = __HYPERVISOR_physdev_op;
-+		mcl->args[0] = PHYSDEVOP_set_iopl;
-+		mcl->args[1] = (unsigned long)&iopl_op;
-+		mcl++;
-+	}
++		video_rom_resource.start = start;
 +
-+	if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
-+		iobmp_op.bitmap   = (char *)next->io_bitmap_ptr;
-+		iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
-+		mcl->op      = __HYPERVISOR_physdev_op;
-+		mcl->args[0] = PHYSDEVOP_set_iobitmap;
-+		mcl->args[1] = (unsigned long)&iobmp_op;
-+		mcl++;
-+	}
++		/* 0 < length <= 0x7f * 512, historically */
++		length = rom[2] * 512;
 +
-+	(void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
-+	/* 
-+	 * Switch DS and ES.
-+	 * This won't pick up thread selector changes, but I guess that is ok.
-+	 */
-+	if (unlikely(next->es))
-+		loadsegment(es, next->es); 
-+	
-+	if (unlikely(next->ds))
-+		loadsegment(ds, next->ds);
++		/* if checksum okay, trust length byte */
++		if (length && romchecksum(rom, length))
++			video_rom_resource.end = start + length - 1;
 +
-+	/* 
-+	 * Switch FS and GS.
-+	 */
-+	if (unlikely(next->fsindex))
-+		loadsegment(fs, next->fsindex);
++		request_resource(&iomem_resource, &video_rom_resource);
++		break;
++			}
 +
-+	if (next->fs)
-+		HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs); 
-+	
-+	if (unlikely(next->gsindex))
-+		load_gs_index(next->gsindex);
++	start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++	if (start < upper)
++		start = upper;
 +
-+	if (next->gs)
-+		HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs); 
++	/* system rom */
++	request_resource(&iomem_resource, &system_rom_resource);
++	upper = system_rom_resource.start;
 +
-+	/* Must be after DS reload */
-+	if (prev_p->thread_info->status & TS_USEDFPU) {
-+		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
-+		HYPERVISOR_fpu_taskswitch(1);
++	/* check for extension rom (ignore length byte!) */
++	rom = isa_bus_to_virt(extension_rom_resource.start);
++	if (romsignature(rom)) {
++		length = extension_rom_resource.end - extension_rom_resource.start + 1;
++		if (romchecksum(rom, length)) {
++			request_resource(&iomem_resource, &extension_rom_resource);
++			upper = extension_rom_resource.start;
++		}
 +	}
 +
-+	/* 
-+	 * Switch the PDA and FPU contexts.
-+	 */
-+	prev->userrsp = read_pda(oldrsp); 
-+	write_pda(oldrsp, next->userrsp); 
-+	write_pda(pcurrent, next_p); 
-+
-+	write_pda(kernelstack,
-+		  task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
++	/* check for adapter roms on 2k boundaries */
++	for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper;
++	     start += 2048) {
++		rom = isa_bus_to_virt(start);
++		if (!romsignature(rom))
++			continue;
 +
-+	/*
-+	 * Now maybe reload the debug registers
-+	 */
-+	if (unlikely(next->debugreg7)) {
-+		set_debugreg(next->debugreg0, 0);
-+		set_debugreg(next->debugreg1, 1);
-+		set_debugreg(next->debugreg2, 2);
-+		set_debugreg(next->debugreg3, 3);
-+		/* no 4 and 5 */
-+		set_debugreg(next->debugreg6, 6);
-+		set_debugreg(next->debugreg7, 7);
-+	}
++		/* 0 < length <= 0x7f * 512, historically */
++		length = rom[2] * 512;
 +
-+	return prev_p;
-+}
++		/* but accept any length that fits if checksum okay */
++		if (!length || start + length > upper || !romchecksum(rom, length))
++			continue;
 +
-+/*
-+ * sys_execve() executes a new program.
-+ */
-+asmlinkage 
-+long sys_execve(char __user *name, char __user * __user *argv,
-+		char __user * __user *envp, struct pt_regs regs)
-+{
-+	long error;
-+	char * filename;
++		adapter_rom_resources[i].start = start;
++		adapter_rom_resources[i].end = start + length - 1;
++		request_resource(&iomem_resource, &adapter_rom_resources[i]);
 +
-+	filename = getname(name);
-+	error = PTR_ERR(filename);
-+	if (IS_ERR(filename)) 
-+		return error;
-+	error = do_execve(filename, argv, envp, &regs); 
-+	if (error == 0) {
-+		task_lock(current);
-+		current->ptrace &= ~PT_DTRACE;
-+		task_unlock(current);
++		start = adapter_rom_resources[i++].end & ~2047UL;
 +	}
-+	putname(filename);
-+	return error;
 +}
 +
-+void set_personality_64bit(void)
++#ifdef CONFIG_PROC_VMCORE
++/* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel. This option will be passed
++ * by kexec loader to the capture kernel.
++ */
++static int __init setup_elfcorehdr(char *arg)
 +{
-+	/* inherit personality from parent */
-+
-+	/* Make sure to be in 64bit mode */
-+	clear_thread_flag(TIF_IA32); 
-+
-+	/* TBD: overwrites user setup. Should have two bits.
-+	   But 64bit processes have always behaved this way,
-+	   so it's not too bad. The main problem is just that
-+   	   32bit childs are affected again. */
-+	current->personality &= ~READ_IMPLIES_EXEC;
++	char *end;
++	if (!arg)
++		return -EINVAL;
++	elfcorehdr_addr = memparse(arg, &end);
++	return end > arg ? 0 : -EINVAL;
 +}
++early_param("elfcorehdr", setup_elfcorehdr);
++#endif
 +
-+asmlinkage long sys_fork(struct pt_regs *regs)
++#ifndef CONFIG_NUMA
++static void __init
++contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
 +{
-+	return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
-+}
++	unsigned long bootmap_size, bootmap;
 +
-+asmlinkage long
-+sys_clone(unsigned long clone_flags, unsigned long newsp,
-+	  void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
-+{
-+	if (!newsp)
-+		newsp = regs->rsp;
-+	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
-+}
++	bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
++	bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
++	if (bootmap == -1L)
++		panic("Cannot find bootmem map of size %ld\n",bootmap_size);
++	bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
++	e820_register_active_regions(0, start_pfn, end_pfn);
++#ifdef CONFIG_XEN
++	free_bootmem_with_active_regions(0, xen_start_info->nr_pages);
++#else
++	free_bootmem_with_active_regions(0, end_pfn);
++#endif
++	reserve_bootmem(bootmap, bootmap_size);
++} 
++#endif
 +
-+/*
-+ * This is trivial, and on the face of it looks like it
-+ * could equally well be done in user mode.
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ *              from boot_params into a safe place.
 + *
-+ * Not so, for quite unobvious reasons - register pressure.
-+ * In user mode vfork() cannot have a stack frame, and if
-+ * done by calling the "clone()" system call directly, you
-+ * do not have enough call-clobbered registers to hold all
-+ * the information you need.
 + */
-+asmlinkage long sys_vfork(struct pt_regs *regs)
++static inline void copy_edd(void)
 +{
-+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
-+		    NULL, NULL);
++     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++     edd.edd_info_nr = EDD_NR;
 +}
-+
-+unsigned long get_wchan(struct task_struct *p)
++#else
++static inline void copy_edd(void)
 +{
-+	unsigned long stack;
-+	u64 fp,rip;
-+	int count = 0;
-+
-+	if (!p || p == current || p->state==TASK_RUNNING)
-+		return 0; 
-+	stack = (unsigned long)task_stack_page(p);
-+	if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
-+		return 0;
-+	fp = *(u64 *)(p->thread.rsp);
-+	do { 
-+		if (fp < (unsigned long)stack ||
-+		    fp > (unsigned long)stack+THREAD_SIZE)
-+			return 0; 
-+		rip = *(u64 *)(fp+8); 
-+		if (!in_sched_functions(rip))
-+			return rip; 
-+		fp = *(u64 *)fp; 
-+	} while (count++ < 16); 
-+	return 0;
 +}
++#endif
 +
-+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
-+{ 
-+	int ret = 0; 
-+	int doit = task == current;
-+	int cpu;
++#ifndef CONFIG_XEN
++#define EBDA_ADDR_POINTER 0x40E
 +
-+	switch (code) { 
-+	case ARCH_SET_GS:
-+		if (addr >= TASK_SIZE_OF(task))
-+			return -EPERM; 
-+		cpu = get_cpu();
-+		/* handle small bases via the GDT because that's faster to 
-+		   switch. */
-+		if (addr <= 0xffffffff) {  
-+			set_32bit_tls(task, GS_TLS, addr); 
-+			if (doit) { 
-+				load_TLS(&task->thread, cpu);
-+				load_gs_index(GS_TLS_SEL); 
-+			}
-+			task->thread.gsindex = GS_TLS_SEL; 
-+			task->thread.gs = 0;
-+		} else { 
-+			task->thread.gsindex = 0;
-+			task->thread.gs = addr;
-+			if (doit) {
-+				load_gs_index(0);
-+				ret = HYPERVISOR_set_segment_base(
-+					SEGBASE_GS_USER, addr);
-+			} 
-+		}
-+		put_cpu();
-+		break;
-+	case ARCH_SET_FS:
-+		/* Not strictly needed for fs, but do it for symmetry
-+		   with gs */
-+		if (addr >= TASK_SIZE_OF(task))
-+			return -EPERM; 
-+		cpu = get_cpu();
-+		/* handle small bases via the GDT because that's faster to 
-+		   switch. */
-+		if (addr <= 0xffffffff) { 
-+			set_32bit_tls(task, FS_TLS, addr);
-+			if (doit) { 
-+				load_TLS(&task->thread, cpu); 
-+				asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
-+			}
-+			task->thread.fsindex = FS_TLS_SEL;
-+			task->thread.fs = 0;
-+		} else { 
-+			task->thread.fsindex = 0;
-+			task->thread.fs = addr;
-+			if (doit) {
-+				/* set the selector to 0 to not confuse
-+				   __switch_to */
-+				asm volatile("movl %0,%%fs" :: "r" (0));
-+                                ret = HYPERVISOR_set_segment_base(SEGBASE_FS,
-+								  addr);
-+			}
-+		}
-+		put_cpu();
-+		break;
-+	case ARCH_GET_FS: { 
-+		unsigned long base; 
-+		if (task->thread.fsindex == FS_TLS_SEL)
-+			base = read_32bit_tls(task, FS_TLS);
-+		else if (doit)
-+			rdmsrl(MSR_FS_BASE, base);
-+		else
-+			base = task->thread.fs;
-+		ret = put_user(base, (unsigned long __user *)addr); 
-+		break; 
-+	}
-+	case ARCH_GET_GS: { 
-+		unsigned long base;
-+		unsigned gsindex;
-+		if (task->thread.gsindex == GS_TLS_SEL)
-+			base = read_32bit_tls(task, GS_TLS);
-+		else if (doit) {
-+ 			asm("movl %%gs,%0" : "=r" (gsindex));
-+			if (gsindex)
-+				rdmsrl(MSR_KERNEL_GS_BASE, base);
-+			else
-+				base = task->thread.gs;
-+		}
-+		else
-+			base = task->thread.gs;
-+		ret = put_user(base, (unsigned long __user *)addr); 
-+		break;
-+	}
++unsigned __initdata ebda_addr;
++unsigned __initdata ebda_size;
 +
-+	default:
-+		ret = -EINVAL;
-+		break;
-+	} 
++static void discover_ebda(void)
++{
++	/*
++	 * there is a real-mode segmented pointer pointing to the 
++	 * 4K EBDA area at 0x40E
++	 */
++	ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
++	ebda_addr <<= 4;
 +
-+	return ret;	
-+} 
++	ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
 +
-+long sys_arch_prctl(int code, unsigned long addr)
-+{
-+	return do_arch_prctl(current, code, addr);
-+} 
++	/* Round EBDA up to pages */
++	if (ebda_size == 0)
++		ebda_size = 1;
++	ebda_size <<= 10;
++	ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
++	if (ebda_size > 64*1024)
++		ebda_size = 64*1024;
++}
++#endif
 +
-+/* 
-+ * Capture the user space registers if the task is not running (in user space)
-+ */
-+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++void __init setup_arch(char **cmdline_p)
 +{
-+	struct pt_regs *pp, ptregs;
++	printk(KERN_INFO "Command line: %s\n", saved_command_line);
 +
-+	pp = task_pt_regs(tsk);
++#ifdef CONFIG_XEN
++	/* Register a call for panic conditions. */
++	atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
 +
-+	ptregs = *pp; 
-+	ptregs.cs &= 0xffff;
-+	ptregs.ss &= 0xffff;
++ 	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
++ 	screen_info = SCREEN_INFO;
 +
-+	elf_core_copy_regs(regs, &ptregs);
-+ 
-+        boot_option_idle_override = 1;
-+	return 1;
-+}
++	if (is_initial_xendomain()) {
++		/* This is drawn from a dump from vgacon:startup in
++		 * standard Linux. */
++		screen_info.orig_video_mode = 3;
++		screen_info.orig_video_isVGA = 1;
++		screen_info.orig_video_lines = 25;
++		screen_info.orig_video_cols = 80;
++		screen_info.orig_video_ega_bx = 3;
++		screen_info.orig_video_points = 16;
++		screen_info.orig_y = screen_info.orig_video_lines - 1;
++		if (xen_start_info->console.dom0.info_size >=
++		    sizeof(struct dom0_vga_console_info)) {
++			const struct dom0_vga_console_info *info =
++				(struct dom0_vga_console_info *)(
++					(char *)xen_start_info +
++					xen_start_info->console.dom0.info_off);
++			dom0_init_screen_info(info);
++		}
++		xen_start_info->console.domU.mfn = 0;
++		xen_start_info->console.domU.evtchn = 0;
++	} else
++		screen_info.orig_video_isVGA = 0;
++#else
++ 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
++ 	screen_info = SCREEN_INFO;
++#endif	/* !CONFIG_XEN */
 +
-+unsigned long arch_align_stack(unsigned long sp)
-+{
-+	if (randomize_va_space)
-+		sp -= get_random_int() % 8192;
-+	return sp & ~0xf;
-+}
++	edid_info = EDID_INFO;
++	saved_video_mode = SAVED_VIDEO_MODE;
++	bootloader_type = LOADER_TYPE;
 +
-+#ifndef CONFIG_SMP
-+void _restore_vcpu(void)
-+{
-+}
++#ifdef CONFIG_BLK_DEV_RAM
++	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/setup64-xen.c linux-2.6.18-xen/arch/x86_64/kernel/setup64-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/setup64-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/setup64-xen.c	2006-11-19 14:26:34.000000000 +0100
-@@ -0,0 +1,361 @@
-+/* 
-+ * X86-64 specific CPU setup.
-+ * Copyright (C) 1995  Linus Torvalds
-+ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
-+ * See setup.c for older changelog.
-+ *
-+ * Jun Nakajima <jun.nakajima at intel.com> 
-+ *   Modified for Xen
-+ *
-+ */ 
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/bootmem.h>
-+#include <linux/bitops.h>
-+#include <linux/module.h>
-+#include <asm/bootsetup.h>
-+#include <asm/pda.h>
-+#include <asm/pgtable.h>
-+#include <asm/processor.h>
-+#include <asm/desc.h>
-+#include <asm/atomic.h>
-+#include <asm/mmu_context.h>
-+#include <asm/smp.h>
-+#include <asm/i387.h>
-+#include <asm/percpu.h>
-+#include <asm/proto.h>
-+#include <asm/sections.h>
 +#ifdef CONFIG_XEN
-+#include <asm/hypervisor.h>
++	setup_xen_features();
++
++	HYPERVISOR_vm_assist(VMASST_CMD_enable,
++			     VMASST_TYPE_writable_pagetables);
++
++	ARCH_SETUP
 +#endif
 +
-+char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
++	setup_memory_region();
++	copy_edd();
 +
-+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++	if (!MOUNT_ROOT_RDONLY)
++		root_mountflags &= ~MS_RDONLY;
++	init_mm.start_code = (unsigned long) &_text;
++	init_mm.end_code = (unsigned long) &_etext;
++	init_mm.end_data = (unsigned long) &_edata;
++	init_mm.brk = (unsigned long) &_end;
 +
-+struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
-+EXPORT_SYMBOL(_cpu_pda);
-+struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
++	code_resource.start = virt_to_phys(&_text);
++	code_resource.end = virt_to_phys(&_etext)-1;
++	data_resource.start = virt_to_phys(&_etext);
++	data_resource.end = virt_to_phys(&_edata)-1;
 +
-+#ifndef CONFIG_X86_NO_IDT
-+struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 
-+#endif
++	early_identify_cpu(&boot_cpu_data);
 +
-+char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
++	strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
++	*cmdline_p = command_line;
 +
-+unsigned long __supported_pte_mask __read_mostly = ~0UL;
-+EXPORT_SYMBOL(__supported_pte_mask);
-+static int do_not_nx __cpuinitdata = 0;
++	parse_early_param();
 +
-+/* noexec=on|off
-+Control non executable mappings for 64bit processes.
++	finish_e820_parsing();
 +
-+on	Enable(default)
-+off	Disable
-+*/ 
-+int __init nonx_setup(char *str)
-+{
-+	if (!strncmp(str, "on", 2)) {
-+                __supported_pte_mask |= _PAGE_NX; 
-+ 		do_not_nx = 0; 
-+	} else if (!strncmp(str, "off", 3)) {
-+		do_not_nx = 1;
-+		__supported_pte_mask &= ~_PAGE_NX;
-+        }
-+	return 1;
-+} 
-+__setup("noexec=", nonx_setup);	/* parsed early actually */
++	e820_register_active_regions(0, 0, -1UL);
++	/*
++	 * partially used pages are not usable - thus
++	 * we are rounding upwards:
++	 */
++	end_pfn = e820_end_of_ram();
++	num_physpages = end_pfn;
 +
-+int force_personality32 = 0; 
++	check_efer();
 +
-+/* noexec32=on|off
-+Control non executable heap for 32bit processes.
-+To control the stack too use noexec=off
++#ifndef CONFIG_XEN
++	discover_ebda();
++#endif
 +
-+on	PROT_READ does not imply PROT_EXEC for 32bit processes
-+off	PROT_READ implies PROT_EXEC (default)
-+*/
-+static int __init nonx32_setup(char *str)
-+{
-+	if (!strcmp(str, "on"))
-+		force_personality32 &= ~READ_IMPLIES_EXEC;
-+	else if (!strcmp(str, "off"))
-+		force_personality32 |= READ_IMPLIES_EXEC;
-+	return 1;
-+}
-+__setup("noexec32=", nonx32_setup);
++	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
 +
-+/*
-+ * Great future plan:
-+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
-+ * Always point %gs to its beginning
-+ */
-+void __init setup_per_cpu_areas(void)
-+{ 
-+	int i;
-+	unsigned long size;
++	zap_low_mappings(0);
 +
-+#ifdef CONFIG_HOTPLUG_CPU
-+	prefill_possible_map();
++	/* How many end-of-memory variables you have, grandma! */
++	max_low_pfn = end_pfn;
++	max_pfn = end_pfn;
++	high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
++
++	/* Remove active ranges so rediscovery with NUMA-awareness happens */
++	remove_all_active_ranges();
++
++#ifdef CONFIG_ACPI_NUMA
++	/*
++	 * Parse SRAT to discover nodes.
++	 */
++	acpi_numa_init();
 +#endif
 +
-+	/* Copy section for each CPU (we discard the original) */
-+	size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
-+#ifdef CONFIG_MODULES
-+	if (size < PERCPU_ENOUGH_ROOM)
-+		size = PERCPU_ENOUGH_ROOM;
++#ifdef CONFIG_NUMA
++	numa_initmem_init(0, end_pfn); 
++#else
++	contig_initmem_init(0, end_pfn);
 +#endif
 +
-+	for_each_cpu_mask (i, cpu_possible_map) {
-+		char *ptr;
++	/* Reserve direct mapping */
++	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
++				(table_end - table_start) << PAGE_SHIFT);
 +
-+		if (!NODE_DATA(cpu_to_node(i))) {
-+			printk("cpu with no node %d, num_online_nodes %d\n",
-+			       i, num_online_nodes());
-+			ptr = alloc_bootmem(size);
-+		} else { 
-+			ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
-+		}
-+		if (!ptr)
-+			panic("Cannot allocate cpu data for CPU %d\n", i);
-+		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
-+		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
-+	}
-+} 
++	/* reserve kernel */
++	reserve_bootmem_generic(__pa_symbol(&_text),
++				__pa_symbol(&_end) - __pa_symbol(&_text));
 +
 +#ifdef CONFIG_XEN
-+static void switch_pt(void)
-+{
-+	xen_pt_switch(__pa(init_level4_pgt));
-+        xen_new_user_pt(__pa(init_level4_user_pgt));
-+}
-+
-+void __cpuinit cpu_gdt_init(struct desc_ptr *gdt_descr)
-+{
-+	unsigned long frames[16];
-+	unsigned long va;
-+	int f;
-+
-+	for (va = gdt_descr->address, f = 0;
-+	     va < gdt_descr->address + gdt_descr->size;
-+	     va += PAGE_SIZE, f++) {
-+		frames[f] = virt_to_mfn(va);
-+		make_page_readonly(
-+			(void *)va, XENFEAT_writable_descriptor_tables);
-+	}
-+	if (HYPERVISOR_set_gdt(frames, gdt_descr->size /
-+                               sizeof (struct desc_struct)))
-+		BUG();
-+}
++	/* reserve physmap, start info and initial page tables */
++	reserve_bootmem(__pa_symbol(&_end), (table_start<<PAGE_SHIFT)-__pa_symbol(&_end));
 +#else
-+static void switch_pt(void)
-+{
-+	asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
-+}
++	/*
++	 * reserve physical page 0 - it's a special BIOS page on many boxes,
++	 * enabling clean reboots, SMP operation, laptop functions.
++	 */
++	reserve_bootmem_generic(0, PAGE_SIZE);
 +
-+void __init cpu_gdt_init(struct desc_ptr *gdt_descr)
-+{
-+	asm volatile("lgdt %0" :: "m" (*gdt_descr));
-+	asm volatile("lidt %0" :: "m" (idt_descr));
-+}
++	/* reserve ebda region */
++	if (ebda_addr)
++		reserve_bootmem_generic(ebda_addr, ebda_size);
 +#endif
 +
-+void pda_init(int cpu)
-+{ 
-+	struct x8664_pda *pda = cpu_pda(cpu);
++#ifdef CONFIG_SMP
++	/*
++	 * But first pinch a few for the stack/trampoline stuff
++	 * FIXME: Don't need the extra page at 4K, but need to fix
++	 * trampoline before removing it. (see the GDT stuff)
++	 */
++	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
 +
-+	/* Setup up data that may be needed in __get_free_pages early */
-+	asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 
-+#ifndef CONFIG_XEN
-+	wrmsrl(MSR_GS_BASE, pda);
-+#else
-+	HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, (unsigned long)pda);
++	/* Reserve SMP trampoline */
++	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
 +#endif
-+	pda->cpunumber = cpu; 
-+	pda->irqcount = -1;
-+	pda->kernelstack = 
-+		(unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; 
-+	pda->active_mm = &init_mm;
-+	pda->mmu_state = 0;
 +
-+	if (cpu == 0) {
++#ifdef CONFIG_ACPI_SLEEP
++       /*
++        * Reserve low memory region for sleep support.
++        */
++       acpi_reserve_bootmem();
++#endif
 +#ifdef CONFIG_XEN
-+		xen_init_pt();
++#ifdef CONFIG_BLK_DEV_INITRD
++	if (xen_start_info->mod_start) {
++		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++			/*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
++			initrd_start = INITRD_START + PAGE_OFFSET;
++			initrd_end = initrd_start+INITRD_SIZE;
++			initrd_below_start_ok = 1;
++		} else {
++			printk(KERN_ERR "initrd extends beyond end of memory "
++				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++				(unsigned long)(INITRD_START + INITRD_SIZE),
++				(unsigned long)(end_pfn << PAGE_SHIFT));
++			initrd_start = 0;
++		}
++	}
 +#endif
-+		/* others are initialized in smpboot.c */
-+		pda->pcurrent = &init_task;
-+		pda->irqstackptr = boot_cpu_stack; 
-+	} else {
-+		pda->irqstackptr = (char *)
-+			__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
-+		if (!pda->irqstackptr)
-+			panic("cannot allocate irqstack for cpu %d", cpu); 
++#else	/* CONFIG_XEN */
++#ifdef CONFIG_BLK_DEV_INITRD
++	if (LOADER_TYPE && INITRD_START) {
++		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
++			initrd_start = INITRD_START + PAGE_OFFSET;
++			initrd_end = initrd_start+INITRD_SIZE;
++		}
++		else {
++			printk(KERN_ERR "initrd extends beyond end of memory "
++			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++			    (unsigned long)(INITRD_START + INITRD_SIZE),
++			    (unsigned long)(end_pfn << PAGE_SHIFT));
++			initrd_start = 0;
++		}
++	}
++#endif
++#endif	/* !CONFIG_XEN */
++#ifdef CONFIG_KEXEC
++	if (crashk_res.start != crashk_res.end) {
++		reserve_bootmem_generic(crashk_res.start,
++			crashk_res.end - crashk_res.start + 1);
 +	}
++#endif
 +
-+	switch_pt();
++	paging_init();
++	/*
++	 * Find and reserve possible boot-time SMP configuration:
++	 */
++	find_smp_config();
++#ifdef CONFIG_XEN
++	{
++		int i, j, k, fpp;
 +
-+	pda->irqstackptr += IRQSTACKSIZE-64;
-+} 
++		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++			/* Make sure we have a large enough P->M table. */
++			phys_to_machine_mapping = alloc_bootmem_pages(
++				end_pfn * sizeof(unsigned long));
++			memset(phys_to_machine_mapping, ~0,
++			       end_pfn * sizeof(unsigned long));
++			memcpy(phys_to_machine_mapping,
++			       (unsigned long *)xen_start_info->mfn_list,
++			       xen_start_info->nr_pages * sizeof(unsigned long));
++			free_bootmem(
++				__pa(xen_start_info->mfn_list),
++				PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++						sizeof(unsigned long))));
 +
-+#ifndef CONFIG_X86_NO_TSS
-+char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
-+__attribute__((section(".bss.page_aligned")));
++			/*
++			 * Initialise the list of the frames that specify the
++			 * list of frames that make up the p2m table. Used by
++                         * save/restore.
++			 */
++			pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
++			HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++				virt_to_mfn(pfn_to_mfn_frame_list_list);
++
++			fpp = PAGE_SIZE/sizeof(unsigned long);
++			for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
++				if ((j % fpp) == 0) {
++					k++;
++					BUG_ON(k>=fpp);
++					pfn_to_mfn_frame_list[k] =
++						alloc_bootmem_pages(PAGE_SIZE);
++					pfn_to_mfn_frame_list_list[k] =
++						virt_to_mfn(pfn_to_mfn_frame_list[k]);
++					j=0;
++				}
++				pfn_to_mfn_frame_list[k][j] =
++					virt_to_mfn(&phys_to_machine_mapping[i]);
++			}
++			HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
++		}
++
++	}
++
++	if (is_initial_xendomain())
++		dmi_scan_machine();
++
++#ifdef CONFIG_ACPI
++	if (!is_initial_xendomain()) {
++		acpi_disabled = 1;
++		acpi_ht = 0;
++	}
++#endif
 +#endif
 +
-+/* May not be marked __init: used by software suspend */
-+void syscall_init(void)
-+{
 +#ifndef CONFIG_XEN
-+	/* 
-+	 * LSTAR and STAR live in a bit strange symbiosis.
-+	 * They both write to the same internal register. STAR allows to set CS/DS
-+	 * but only a 32bit target. LSTAR sets the 64bit rip. 	 
-+	 */ 
-+	wrmsrl(MSR_STAR,  ((u64)__USER32_CS)<<48  | ((u64)__KERNEL_CS)<<32); 
-+	wrmsrl(MSR_LSTAR, system_call); 
++#ifdef CONFIG_PCI
++	early_quirks();
++#endif
++#endif
 +
-+	/* Flags to clear on syscall */
-+	wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); 
++	/*
++	 * set this early, so we dont allocate cpu0
++	 * if MADT list doesnt list BSP first
++	 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
++	 */
++	cpu_set(0, cpu_present_map);
++#ifdef CONFIG_ACPI
++	/*
++	 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
++	 * Call this early for SRAT node setup.
++	 */
++	acpi_boot_table_init();
++
++	/*
++	 * Read APIC and some other early information from ACPI tables.
++	 */
++	acpi_boot_init();
 +#endif
-+#ifdef CONFIG_IA32_EMULATION   		
-+	syscall32_cpu_init ();
++
++	init_cpu_to_node();
++
++	/*
++	 * get boot-time SMP configuration:
++	 */
++	if (smp_found_config)
++		get_smp_config();
++#ifndef CONFIG_XEN
++	init_apic_mappings();
++#endif
++#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
++	prefill_possible_map();
 +#endif
-+}
 +
-+void __cpuinit check_efer(void)
-+{
-+	unsigned long efer;
++	/*
++	 * Request address space for all standard RAM and ROM resources
++	 * and also for regions reported as reserved by the e820.
++	 */
++	probe_roms();
++#ifdef CONFIG_XEN
++	if (is_initial_xendomain()) {
++		struct xen_memory_map memmap;
 +
-+	rdmsrl(MSR_EFER, efer); 
-+        if (!(efer & EFER_NX) || do_not_nx) { 
-+                __supported_pte_mask &= ~_PAGE_NX; 
-+        }       
-+}
++		memmap.nr_entries = E820MAX;
++		set_xen_guest_handle(memmap.buffer, machine_e820.map);
 +
-+/*
-+ * cpu_init() initializes state that is per-CPU. Some data is already
-+ * initialized (naturally) in the bootstrap process, such as the GDT
-+ * and IDT. We reload them nevertheless, this function acts as a
-+ * 'CPU state barrier', nothing should get across.
-+ * A lot of state is already set up in PDA init.
-+ */
-+void __cpuinit cpu_init (void)
-+{
-+	int cpu = stack_smp_processor_id();
-+#ifndef CONFIG_X86_NO_TSS
-+	struct tss_struct *t = &per_cpu(init_tss, cpu);
-+	struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
-+	unsigned long v; 
-+	char *estacks = NULL; 
++		if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++			BUG();
++		machine_e820.nr_map = memmap.nr_entries;
++
++		e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
++	}
++#else
++	e820_reserve_resources(e820.map, e820.nr_map);
++#endif
++	e820_mark_nosave_regions();
++
++	request_resource(&iomem_resource, &video_ram_resource);
++
++	{
 +	unsigned i;
++	/* request I/O space for devices used on all i[345]86 PCs */
++	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
++		request_resource(&ioport_resource, &standard_io_resources[i]);
++	}
++
++#ifdef CONFIG_XEN
++	if (is_initial_xendomain())
++		e820_setup_gap(machine_e820.map, machine_e820.nr_map);
++#else
++	e820_setup_gap(e820.map, e820.nr_map);
++#endif
++
++#ifdef CONFIG_XEN
++	{
++		struct physdev_set_iopl set_iopl;
++
++		set_iopl.iopl = 1;
++		HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++
++		if (is_initial_xendomain()) {
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++			conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++			conswitchp = &dummy_con;
 +#endif
-+	struct task_struct *me;
-+
-+	/* CPU 0 is initialised in head64.c */
-+	if (cpu != 0) {
-+		pda_init(cpu);
-+		zap_low_mappings(cpu);
-+	}
-+#ifndef CONFIG_X86_NO_TSS
-+	else
-+		estacks = boot_exception_stacks; 
 +#endif
++		} else {
++#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
++                    conswitchp = &dummy_con;
++#endif
++                }
++	}
++#else	/* CONFIG_XEN */
 +
-+	me = current;
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++	conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++	conswitchp = &dummy_con;
++#endif
++#endif
 +
-+	if (cpu_test_and_set(cpu, cpu_initialized))
-+		panic("CPU#%d already initialized!\n", cpu);
++#endif /* !CONFIG_XEN */
++}
 +
-+	printk("Initializing CPU#%d\n", cpu);
++#ifdef CONFIG_XEN
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++	HYPERVISOR_shutdown(SHUTDOWN_crash);
++	/* we're never actually going to get here... */
++	return NOTIFY_DONE;
++}
++#endif /* !CONFIG_XEN */
 +
-+	clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
 +
-+	/*
-+	 * Initialize the per-CPU GDT with the boot GDT,
-+	 * and set up the GDT descriptor:
-+	 */
-+#ifndef CONFIG_XEN 
-+	if (cpu)
-+ 		memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
-+#endif
++static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++{
++	unsigned int *v;
 +
-+	cpu_gdt_descr[cpu].size = GDT_SIZE;
-+	cpu_gdt_init(&cpu_gdt_descr[cpu]);
++	if (c->extended_cpuid_level < 0x80000004)
++		return 0;
 +
-+	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
-+	syscall_init();
++	v = (unsigned int *) c->x86_model_id;
++	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++	c->x86_model_id[48] = 0;
++	return 1;
++}
 +
-+	wrmsrl(MSR_FS_BASE, 0);
-+	wrmsrl(MSR_KERNEL_GS_BASE, 0);
-+	barrier(); 
 +
-+	check_efer();
++static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
++{
++	unsigned int n, dummy, eax, ebx, ecx, edx;
 +
-+#ifndef CONFIG_X86_NO_TSS
-+	/*
-+	 * set up and load the per-CPU TSS
-+	 */
-+	for (v = 0; v < N_EXCEPTION_STACKS; v++) {
-+		if (cpu) {
-+			static const unsigned int order[N_EXCEPTION_STACKS] = {
-+				[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
-+				[DEBUG_STACK - 1] = DEBUG_STACK_ORDER
-+			};
++	n = c->extended_cpuid_level;
 +
-+			estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
-+			if (!estacks)
-+				panic("Cannot allocate exception stack %ld %d\n",
-+				      v, cpu); 
-+		}
-+		switch (v + 1) {
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+		case DEBUG_STACK:
-+			cpu_pda(cpu)->debugstack = (unsigned long)estacks;
-+			estacks += DEBUG_STKSZ;
-+			break;
-+#endif
-+		default:
-+			estacks += EXCEPTION_STKSZ;
-+			break;
-+		}
-+		orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
++	if (n >= 0x80000005) {
++		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
++		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++		c->x86_cache_size=(ecx>>24)+(edx>>24);
++		/* On K8 L1 TLB is inclusive, so don't count it */
++		c->x86_tlbsize = 0;
 +	}
 +
-+	t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
-+	/*
-+	 * <= is required because the CPU will access up to
-+	 * 8 bits beyond the end of the IO permission bitmap.
-+	 */
-+	for (i = 0; i <= IO_BITMAP_LONGS; i++)
-+		t->io_bitmap[i] = ~0UL;
-+#endif
-+
-+	atomic_inc(&init_mm.mm_count);
-+	me->active_mm = &init_mm;
-+	if (me->mm)
-+		BUG();
-+	enter_lazy_tlb(&init_mm, me);
-+
-+#ifndef CONFIG_X86_NO_TSS
-+	set_tss_desc(cpu, t);
-+#endif
-+#ifndef CONFIG_XEN
-+	load_TR_desc();
-+#endif
-+	load_LDT(&init_mm.context);
++	if (n >= 0x80000006) {
++		cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
++		ecx = cpuid_ecx(0x80000006);
++		c->x86_cache_size = ecx >> 16;
++		c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
 +
-+	/*
-+	 * Clear all 6 debug registers:
-+	 */
++		printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++		c->x86_cache_size, ecx & 0xFF);
++	}
 +
-+	set_debugreg(0UL, 0);
-+	set_debugreg(0UL, 1);
-+	set_debugreg(0UL, 2);
-+	set_debugreg(0UL, 3);
-+	set_debugreg(0UL, 6);
-+	set_debugreg(0UL, 7);
++	if (n >= 0x80000007)
++		cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 
++	if (n >= 0x80000008) {
++		cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 
++		c->x86_virt_bits = (eax >> 8) & 0xff;
++		c->x86_phys_bits = eax & 0xff;
++	}
++}
 +
-+	fpu_init(); 
++#ifdef CONFIG_NUMA
++static int nearby_node(int apicid)
++{
++	int i;
++	for (i = apicid - 1; i >= 0; i--) {
++		int node = apicid_to_node[i];
++		if (node != NUMA_NO_NODE && node_online(node))
++			return node;
++	}
++	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
++		int node = apicid_to_node[i];
++		if (node != NUMA_NO_NODE && node_online(node))
++			return node;
++	}
++	return first_node(node_online_map); /* Shouldn't happen */
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/setup-xen.c linux-2.6.18-xen/arch/x86_64/kernel/setup-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/setup-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/setup-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,1637 @@
-+/*
-+ *  linux/arch/x86-64/kernel/setup.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *
-+ *  Nov 2001 Dave Jones <davej at suse.de>
-+ *  Forked from i386 setup code.
-+ */
++#endif
 +
 +/*
-+ * This file handles the architecture-dependent parts of initialization
++ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
++ * Assumes number of cores is a power of two.
 + */
++static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++	unsigned bits;
++#ifdef CONFIG_NUMA
++	int cpu = smp_processor_id();
++	int node = 0;
++	unsigned apicid = hard_smp_processor_id();
++#endif
++	unsigned ecx = cpuid_ecx(0x80000008);
 +
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/stddef.h>
-+#include <linux/unistd.h>
-+#include <linux/ptrace.h>
-+#include <linux/slab.h>
-+#include <linux/user.h>
-+#include <linux/a.out.h>
-+#include <linux/screen_info.h>
-+#include <linux/ioport.h>
-+#include <linux/delay.h>
-+#include <linux/init.h>
-+#include <linux/initrd.h>
-+#include <linux/highmem.h>
-+#include <linux/bootmem.h>
-+#include <linux/module.h>
-+#include <asm/processor.h>
-+#include <linux/console.h>
-+#include <linux/seq_file.h>
-+#include <linux/crash_dump.h>
-+#include <linux/root_dev.h>
-+#include <linux/pci.h>
-+#include <linux/acpi.h>
-+#include <linux/kallsyms.h>
-+#include <linux/edd.h>
-+#include <linux/mmzone.h>
-+#include <linux/kexec.h>
-+#include <linux/cpufreq.h>
-+#include <linux/dmi.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/ctype.h>
-+
-+#include <asm/mtrr.h>
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/msr.h>
-+#include <asm/desc.h>
-+#include <video/edid.h>
-+#include <asm/e820.h>
-+#include <asm/dma.h>
-+#include <asm/mpspec.h>
-+#include <asm/mmu_context.h>
-+#include <asm/bootsetup.h>
-+#include <asm/proto.h>
-+#include <asm/setup.h>
-+#include <asm/mach_apic.h>
-+#include <asm/numa.h>
-+#include <asm/sections.h>
-+#include <asm/dmi.h>
-+#ifdef CONFIG_XEN
-+#include <linux/percpu.h>
-+#include <linux/pfn.h>
-+#include <xen/interface/physdev.h>
-+#include "setup_arch_pre.h"
-+#include <asm/hypervisor.h>
-+#include <xen/interface/nmi.h>
-+#include <xen/features.h>
-+#include <xen/xencons.h>
-+#include <asm/mach-xen/setup_arch_post.h>
-+#include <xen/interface/memory.h>
-+
-+extern unsigned long start_pfn;
-+extern struct edid_info edid_info;
-+EXPORT_SYMBOL_GPL(edid_info);
++	c->x86_max_cores = (ecx & 0xff) + 1;
 +
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+EXPORT_SYMBOL(HYPERVISOR_shared_info);
++	/* CPU telling us the core id bits shift? */
++	bits = (ecx >> 12) & 0xF;
 +
-+extern char hypercall_page[PAGE_SIZE];
-+EXPORT_SYMBOL(hypercall_page);
++	/* Otherwise recompute */
++	if (bits == 0) {
++		while ((1 << bits) < c->x86_max_cores)
++			bits++;
++	}
 +
-+/* Allows setting of maximum possible memory size  */
-+unsigned long xen_override_max_pfn;
++	/* Low order bits define the core id (index of core in socket) */
++	c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
++	/* Convert the APIC ID into the socket ID */
++	c->phys_proc_id = phys_pkg_id(bits);
 +
-+static int xen_panic_event(struct notifier_block *, unsigned long, void *);
-+static struct notifier_block xen_panic_block = {
-+	xen_panic_event, NULL, 0 /* try to go last */
-+};
++#ifdef CONFIG_NUMA
++  	node = c->phys_proc_id;
++ 	if (apicid_to_node[apicid] != NUMA_NO_NODE)
++ 		node = apicid_to_node[apicid];
++ 	if (!node_online(node)) {
++ 		/* Two possibilities here:
++ 		   - The CPU is missing memory and no node was created.
++ 		   In that case try picking one from a nearby CPU
++ 		   - The APIC IDs differ from the HyperTransport node IDs
++ 		   which the K8 northbridge parsing fills in.
++ 		   Assume they are all increased by a constant offset,
++ 		   but in the same order as the HT nodeids.
++ 		   If that doesn't result in a usable node fall back to the
++ 		   path for the previous case.  */
++ 		int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
++ 		if (ht_nodeid >= 0 &&
++ 		    apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
++ 			node = apicid_to_node[ht_nodeid];
++ 		/* Pick a nearby node */
++ 		if (!node_online(node))
++ 			node = nearby_node(apicid);
++ 	}
++	numa_set_node(cpu, node);
 +
-+unsigned long *phys_to_machine_mapping;
-+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
++	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++#endif
++#endif
++}
 +
-+EXPORT_SYMBOL(phys_to_machine_mapping);
++static void __cpuinit init_amd(struct cpuinfo_x86 *c)
++{
++	unsigned level;
 +
-+DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
-+DEFINE_PER_CPU(int, nr_multicall_ents);
++#ifdef CONFIG_SMP
++	unsigned long value;
 +
-+/* Raw start-of-day parameters from the hypervisor. */
-+start_info_t *xen_start_info;
-+EXPORT_SYMBOL(xen_start_info);
++	/*
++	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
++	 * bit 6 of msr C001_0015
++ 	 *
++	 * Errata 63 for SH-B3 steppings
++	 * Errata 122 for all steppings (F+ have it disabled by default)
++	 */
++	if (c->x86 == 15) {
++		rdmsrl(MSR_K8_HWCR, value);
++		value |= 1 << 6;
++		wrmsrl(MSR_K8_HWCR, value);
++	}
 +#endif
 +
-+/*
-+ * Machine setup..
-+ */
++	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
++	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
++	clear_bit(0*32+31, &c->x86_capability);
++	
++	/* On C+ stepping K8 rep microcode works well for copy/memset */
++	level = cpuid_eax(1);
++	if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
++		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
 +
-+struct cpuinfo_x86 boot_cpu_data __read_mostly;
-+EXPORT_SYMBOL(boot_cpu_data);
++	/* Enable workaround for FXSAVE leak */
++	if (c->x86 >= 6)
++		set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
 +
-+unsigned long mmu_cr4_features;
++	level = get_model_name(c);
++	if (!level) {
++		switch (c->x86) { 
++		case 15:
++			/* Should distinguish Models here, but this is only
++			   a fallback anyways. */
++			strcpy(c->x86_model_id, "Hammer");
++			break; 
++		} 
++	} 
++	display_cacheinfo(c);
 +
-+int acpi_disabled;
-+EXPORT_SYMBOL(acpi_disabled);
-+#ifdef	CONFIG_ACPI
-+extern int __initdata acpi_ht;
-+extern acpi_interrupt_flags	acpi_sci_flags;
-+int __initdata acpi_force = 0;
-+#endif
++	/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
++	if (c->x86_power & (1<<8))
++		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
 +
-+int acpi_numa __initdata;
++	/* Multi core CPU? */
++	if (c->extended_cpuid_level >= 0x80000008)
++		amd_detect_cmp(c);
 +
-+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
-+int bootloader_type;
++	/* Fix cpuid4 emulation for more */
++	num_cache_leaves = 3;
 +
-+unsigned long saved_video_mode;
++	/* RDTSC can be speculated around */
++	clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++}
 +
-+/* 
-+ * Early DMI memory
-+ */
-+int dmi_alloc_index;
-+char dmi_alloc_data[DMI_MAX_DATA];
++static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++	u32 	eax, ebx, ecx, edx;
++	int 	index_msb, core_bits;
 +
-+/*
-+ * Setup options
-+ */
-+struct screen_info screen_info;
-+EXPORT_SYMBOL(screen_info);
-+struct sys_desc_table_struct {
-+	unsigned short length;
-+	unsigned char table[0];
-+};
++	cpuid(1, &eax, &ebx, &ecx, &edx);
 +
-+struct edid_info edid_info;
-+struct e820map e820;
-+#ifdef CONFIG_XEN
-+struct e820map machine_e820;
-+#endif
 +
-+extern int root_mountflags;
++	if (!cpu_has(c, X86_FEATURE_HT))
++		return;
++ 	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
++		goto out;
 +
-+char command_line[COMMAND_LINE_SIZE];
++	smp_num_siblings = (ebx & 0xff0000) >> 16;
 +
-+struct resource standard_io_resources[] = {
-+	{ .name = "dma1", .start = 0x00, .end = 0x1f,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "pic1", .start = 0x20, .end = 0x21,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "timer0", .start = 0x40, .end = 0x43,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "timer1", .start = 0x50, .end = 0x53,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "keyboard", .start = 0x60, .end = 0x6f,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "pic2", .start = 0xa0, .end = 0xa1,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "dma2", .start = 0xc0, .end = 0xdf,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "fpu", .start = 0xf0, .end = 0xff,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO }
-+};
++	if (smp_num_siblings == 1) {
++		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
++	} else if (smp_num_siblings > 1 ) {
 +
-+#define STANDARD_IO_RESOURCES \
-+	(sizeof standard_io_resources / sizeof standard_io_resources[0])
++		if (smp_num_siblings > NR_CPUS) {
++			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
++			smp_num_siblings = 1;
++			return;
++		}
 +
-+#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
++		index_msb = get_count_order(smp_num_siblings);
++		c->phys_proc_id = phys_pkg_id(index_msb);
 +
-+struct resource data_resource = {
-+	.name = "Kernel data",
-+	.start = 0,
-+	.end = 0,
-+	.flags = IORESOURCE_RAM,
-+};
-+struct resource code_resource = {
-+	.name = "Kernel code",
-+	.start = 0,
-+	.end = 0,
-+	.flags = IORESOURCE_RAM,
-+};
++		smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 +
-+#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
++		index_msb = get_count_order(smp_num_siblings) ;
 +
-+static struct resource system_rom_resource = {
-+	.name = "System ROM",
-+	.start = 0xf0000,
-+	.end = 0xfffff,
-+	.flags = IORESOURCE_ROM,
-+};
++		core_bits = get_count_order(c->x86_max_cores);
 +
-+static struct resource extension_rom_resource = {
-+	.name = "Extension ROM",
-+	.start = 0xe0000,
-+	.end = 0xeffff,
-+	.flags = IORESOURCE_ROM,
-+};
++		c->cpu_core_id = phys_pkg_id(index_msb) &
++					       ((1 << core_bits) - 1);
++	}
++out:
++	if ((c->x86_max_cores * smp_num_siblings) > 1) {
++		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
++		printk(KERN_INFO  "CPU: Processor Core ID: %d\n", c->cpu_core_id);
++	}
 +
-+static struct resource adapter_rom_resources[] = {
-+	{ .name = "Adapter ROM", .start = 0xc8000, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM }
-+};
++#endif
++}
 +
-+#define ADAPTER_ROM_RESOURCES \
-+	(sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++/*
++ * find out the number of processor cores on the die
++ */
++static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
++{
++	unsigned int eax, t;
 +
-+static struct resource video_rom_resource = {
-+	.name = "Video ROM",
-+	.start = 0xc0000,
-+	.end = 0xc7fff,
-+	.flags = IORESOURCE_ROM,
-+};
++	if (c->cpuid_level < 4)
++		return 1;
 +
-+static struct resource video_ram_resource = {
-+	.name = "Video RAM area",
-+	.start = 0xa0000,
-+	.end = 0xbffff,
-+	.flags = IORESOURCE_RAM,
-+};
++	cpuid_count(4, 0, &eax, &t, &t, &t);
 +
-+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++	if (eax & 0x1f)
++		return ((eax >> 26) + 1);
++	else
++		return 1;
++}
 +
-+static int __init romchecksum(unsigned char *rom, unsigned long length)
++static void srat_detect_node(void)
 +{
-+	unsigned char *p, sum = 0;
++#ifdef CONFIG_NUMA
++	unsigned node;
++	int cpu = smp_processor_id();
++	int apicid = hard_smp_processor_id();
++
++	/* Don't do the funky fallback heuristics the AMD version employs
++	   for now. */
++	node = apicid_to_node[apicid];
++	if (node == NUMA_NO_NODE)
++		node = first_node(node_online_map);
++	numa_set_node(cpu, node);
 +
-+	for (p = rom; p < rom + length; p++)
-+		sum += *p;
-+	return sum == 0;
++	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++#endif
 +}
 +
-+static void __init probe_roms(void)
++static void __cpuinit init_intel(struct cpuinfo_x86 *c)
 +{
-+	unsigned long start, length, upper;
-+	unsigned char *rom;
-+	int	      i;
-+
-+#ifdef CONFIG_XEN
-+	/* Nothing to do if not running in dom0. */
-+	if (!is_initial_xendomain())
-+		return;
-+#endif
++	/* Cache sizes */
++	unsigned n;
 +
-+	/* video rom */
-+	upper = adapter_rom_resources[0].start;
-+	for (start = video_rom_resource.start; start < upper; start += 2048) {
-+		rom = isa_bus_to_virt(start);
-+		if (!romsignature(rom))
-+			continue;
++	init_intel_cacheinfo(c);
++	if (c->cpuid_level > 9 ) {
++		unsigned eax = cpuid_eax(10);
++		/* Check for version and the number of counters */
++		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
++			set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
++	}
 +
-+		video_rom_resource.start = start;
++	if (cpu_has_ds) {
++		unsigned int l1, l2;
++		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
++		if (!(l1 & (1<<11)))
++			set_bit(X86_FEATURE_BTS, c->x86_capability);
++		if (!(l1 & (1<<12)))
++			set_bit(X86_FEATURE_PEBS, c->x86_capability);
++	}
 +
-+		/* 0 < length <= 0x7f * 512, historically */
-+		length = rom[2] * 512;
++	n = c->extended_cpuid_level;
++	if (n >= 0x80000008) {
++		unsigned eax = cpuid_eax(0x80000008);
++		c->x86_virt_bits = (eax >> 8) & 0xff;
++		c->x86_phys_bits = eax & 0xff;
++		/* CPUID workaround for Intel 0F34 CPU */
++		if (c->x86_vendor == X86_VENDOR_INTEL &&
++		    c->x86 == 0xF && c->x86_model == 0x3 &&
++		    c->x86_mask == 0x4)
++			c->x86_phys_bits = 36;
++	}
 +
-+		/* if checksum okay, trust length byte */
-+		if (length && romchecksum(rom, length))
-+			video_rom_resource.end = start + length - 1;
++	if (c->x86 == 15)
++		c->x86_cache_alignment = c->x86_clflush_size * 2;
++	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
++	    (c->x86 == 0x6 && c->x86_model >= 0x0e))
++		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++	if (c->x86 == 6)
++		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
++	if (c->x86 == 15)
++		set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++	else
++		clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++ 	c->x86_max_cores = intel_num_cpu_cores(c);
 +
-+		request_resource(&iomem_resource, &video_rom_resource);
-+		break;
-+			}
++	srat_detect_node();
++}
 +
-+	start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
-+	if (start < upper)
-+		start = upper;
++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
++{
++	char *v = c->x86_vendor_id;
 +
-+	/* system rom */
-+	request_resource(&iomem_resource, &system_rom_resource);
-+	upper = system_rom_resource.start;
++	if (!strcmp(v, "AuthenticAMD"))
++		c->x86_vendor = X86_VENDOR_AMD;
++	else if (!strcmp(v, "GenuineIntel"))
++		c->x86_vendor = X86_VENDOR_INTEL;
++	else
++		c->x86_vendor = X86_VENDOR_UNKNOWN;
++}
 +
-+	/* check for extension rom (ignore length byte!) */
-+	rom = isa_bus_to_virt(extension_rom_resource.start);
-+	if (romsignature(rom)) {
-+		length = extension_rom_resource.end - extension_rom_resource.start + 1;
-+		if (romchecksum(rom, length)) {
-+			request_resource(&iomem_resource, &extension_rom_resource);
-+			upper = extension_rom_resource.start;
-+		}
-+	}
++struct cpu_model_info {
++	int vendor;
++	int family;
++	char *model_names[16];
++};
 +
-+	/* check for adapter roms on 2k boundaries */
-+	for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
-+		rom = isa_bus_to_virt(start);
-+		if (!romsignature(rom))
-+			continue;
++/* Do some early cpuid on the boot CPU to get some parameter that are
++   needed before check_bugs. Everything advanced is in identify_cpu
++   below. */
++void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
++{
++	u32 tfms;
 +
-+		/* 0 < length <= 0x7f * 512, historically */
-+		length = rom[2] * 512;
++	c->loops_per_jiffy = loops_per_jiffy;
++	c->x86_cache_size = -1;
++	c->x86_vendor = X86_VENDOR_UNKNOWN;
++	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
++	c->x86_vendor_id[0] = '\0'; /* Unset */
++	c->x86_model_id[0] = '\0';  /* Unset */
++	c->x86_clflush_size = 64;
++	c->x86_cache_alignment = c->x86_clflush_size;
++	c->x86_max_cores = 1;
++	c->extended_cpuid_level = 0;
++	memset(&c->x86_capability, 0, sizeof c->x86_capability);
 +
-+		/* but accept any length that fits if checksum okay */
-+		if (!length || start + length > upper || !romchecksum(rom, length))
-+			continue;
++	/* Get vendor name */
++	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
++	      (unsigned int *)&c->x86_vendor_id[0],
++	      (unsigned int *)&c->x86_vendor_id[8],
++	      (unsigned int *)&c->x86_vendor_id[4]);
++		
++	get_cpu_vendor(c);
 +
-+		adapter_rom_resources[i].start = start;
-+		adapter_rom_resources[i].end = start + length - 1;
-+		request_resource(&iomem_resource, &adapter_rom_resources[i]);
++	/* Initialize the standard set of capabilities */
++	/* Note that the vendor-specific code below might override */
 +
-+		start = adapter_rom_resources[i++].end & ~2047UL;
++	/* Intel-defined flags: level 0x00000001 */
++	if (c->cpuid_level >= 0x00000001) {
++		__u32 misc;
++		cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
++		      &c->x86_capability[0]);
++		c->x86 = (tfms >> 8) & 0xf;
++		c->x86_model = (tfms >> 4) & 0xf;
++		c->x86_mask = tfms & 0xf;
++		if (c->x86 == 0xf)
++			c->x86 += (tfms >> 20) & 0xff;
++		if (c->x86 >= 0x6)
++			c->x86_model += ((tfms >> 16) & 0xF) << 4;
++		if (c->x86_capability[0] & (1<<19)) 
++			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
++	} else {
++		/* Have CPUID level 0 only - unheard of */
++		c->x86 = 4;
 +	}
-+}
 +
-+/* Check for full argument with no trailing characters */
-+static int fullarg(char *p, char *arg)
-+{
-+	int l = strlen(arg);
-+	return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
++#ifdef CONFIG_SMP
++	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
++#endif
 +}
 +
-+static __init void parse_cmdline_early (char ** cmdline_p)
++/*
++ * This does the hard work of actually picking apart the CPU stuff...
++ */
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
 +{
-+	char c = ' ', *to = command_line, *from = COMMAND_LINE;
-+	int len = 0;
-+	int userdef = 0;
-+
-+	for (;;) {
-+		if (c != ' ') 
-+			goto next_char; 
++	int i;
++	u32 xlvl;
 +
-+#ifdef  CONFIG_SMP
-+		/*
-+		 * If the BIOS enumerates physical processors before logical,
-+		 * maxcpus=N at enumeration-time can be used to disable HT.
-+		 */
-+		else if (!memcmp(from, "maxcpus=", 8)) {
-+			extern unsigned int maxcpus;
++	early_identify_cpu(c);
 +
-+			maxcpus = simple_strtoul(from + 8, NULL, 0);
-+		}
-+#endif
-+#ifdef CONFIG_ACPI
-+		/* "acpi=off" disables both ACPI table parsing and interpreter init */
-+		if (fullarg(from,"acpi=off"))
-+			disable_acpi();
-+
-+		if (fullarg(from, "acpi=force")) { 
-+			/* add later when we do DMI horrors: */
-+			acpi_force = 1;
-+			acpi_disabled = 0;
-+		}
-+
-+		/* acpi=ht just means: do ACPI MADT parsing 
-+		   at bootup, but don't enable the full ACPI interpreter */
-+		if (fullarg(from, "acpi=ht")) { 
-+			if (!acpi_force)
-+				disable_acpi();
-+			acpi_ht = 1; 
-+		}
-+                else if (fullarg(from, "pci=noacpi")) 
-+			acpi_disable_pci();
-+		else if (fullarg(from, "acpi=noirq"))
-+			acpi_noirq_set();
-+
-+		else if (fullarg(from, "acpi_sci=edge"))
-+			acpi_sci_flags.trigger =  1;
-+		else if (fullarg(from, "acpi_sci=level"))
-+			acpi_sci_flags.trigger = 3;
-+		else if (fullarg(from, "acpi_sci=high"))
-+			acpi_sci_flags.polarity = 1;
-+		else if (fullarg(from, "acpi_sci=low"))
-+			acpi_sci_flags.polarity = 3;
-+
-+		/* acpi=strict disables out-of-spec workarounds */
-+		else if (fullarg(from, "acpi=strict")) {
-+			acpi_strict = 1;
++	/* AMD-defined flags: level 0x80000001 */
++	xlvl = cpuid_eax(0x80000000);
++	c->extended_cpuid_level = xlvl;
++	if ((xlvl & 0xffff0000) == 0x80000000) {
++		if (xlvl >= 0x80000001) {
++			c->x86_capability[1] = cpuid_edx(0x80000001);
++			c->x86_capability[6] = cpuid_ecx(0x80000001);
 +		}
-+#ifdef CONFIG_X86_IO_APIC
-+		else if (fullarg(from, "acpi_skip_timer_override"))
-+			acpi_skip_timer_override = 1;
-+#endif
-+#endif
++		if (xlvl >= 0x80000004)
++			get_model_name(c); /* Default name */
++	}
 +
-+#ifndef CONFIG_XEN
-+		if (fullarg(from, "disable_timer_pin_1"))
-+			disable_timer_pin_1 = 1;
-+		if (fullarg(from, "enable_timer_pin_1"))
-+			disable_timer_pin_1 = -1;
-+
-+		if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
-+			clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
-+			disable_apic = 1;
-+		}
-+
-+		if (fullarg(from, "noapic"))
-+			skip_ioapic_setup = 1;
-+
-+		if (fullarg(from,"apic")) {
-+			skip_ioapic_setup = 0;
-+			ioapic_force = 1;
-+		}
-+#endif
-+			
-+		if (!memcmp(from, "mem=", 4))
-+			parse_memopt(from+4, &from); 
++	/* Transmeta-defined flags: level 0x80860001 */
++	xlvl = cpuid_eax(0x80860000);
++	if ((xlvl & 0xffff0000) == 0x80860000) {
++		/* Don't set x86_cpuid_level here for now to not confuse. */
++		if (xlvl >= 0x80860001)
++			c->x86_capability[2] = cpuid_edx(0x80860001);
++	}
 +
-+		if (!memcmp(from, "memmap=", 7)) {
-+			/* exactmap option is for used defined memory */
-+			if (!memcmp(from+7, "exactmap", 8)) {
-+#ifdef CONFIG_CRASH_DUMP
-+				/* If we are doing a crash dump, we
-+				 * still need to know the real mem
-+				 * size before original memory map is
-+				 * reset.
-+				 */
-+				saved_max_pfn = e820_end_of_ram();
-+#endif
-+				from += 8+7;
-+				end_pfn_map = 0;
-+				e820.nr_map = 0;
-+				userdef = 1;
-+			}
-+			else {
-+				parse_memmapopt(from+7, &from);
-+				userdef = 1;
-+			}
-+		}
++	c->apicid = phys_pkg_id(0);
 +
-+#ifdef CONFIG_NUMA
-+		if (!memcmp(from, "numa=", 5))
-+			numa_setup(from+5); 
-+#endif
++	/*
++	 * Vendor-specific initialization.  In this section we
++	 * canonicalize the feature flags, meaning if there are
++	 * features a certain CPU supports which CPUID doesn't
++	 * tell us, CPUID claiming incorrect flags, or other bugs,
++	 * we handle them here.
++	 *
++	 * At the end of this section, c->x86_capability better
++	 * indicate the features this CPU genuinely supports!
++	 */
++	switch (c->x86_vendor) {
++	case X86_VENDOR_AMD:
++		init_amd(c);
++		break;
 +
-+		if (!memcmp(from,"iommu=",6)) { 
-+			iommu_setup(from+6); 
-+		}
++	case X86_VENDOR_INTEL:
++		init_intel(c);
++		break;
 +
-+		if (fullarg(from,"oops=panic"))
-+			panic_on_oops = 1;
++	case X86_VENDOR_UNKNOWN:
++	default:
++		display_cacheinfo(c);
++		break;
++	}
 +
-+		if (!memcmp(from, "noexec=", 7))
-+			nonx_setup(from + 7);
++	select_idle_routine(c);
++	detect_ht(c); 
 +
-+#ifdef CONFIG_KEXEC
-+		/* crashkernel=size at addr specifies the location to reserve for
-+		 * a crash kernel.  By reserving this memory we guarantee
-+		 * that linux never set's it up as a DMA target.
-+		 * Useful for holding code to do something appropriate
-+		 * after a kernel panic.
-+		 */
-+		else if (!memcmp(from, "crashkernel=", 12)) {
-+			unsigned long size, base;
-+			size = memparse(from+12, &from);
-+			if (*from == '@') {
-+				base = memparse(from+1, &from);
-+				/* FIXME: Do I want a sanity check
-+				 * to validate the memory range?
-+				 */
-+				crashk_res.start = base;
-+				crashk_res.end   = base + size - 1;
-+			}
-+		}
-+#endif
++	/*
++	 * On SMP, boot_cpu_data holds the common feature set between
++	 * all CPUs; so make sure that we indicate which features are
++	 * common between the CPUs.  The first time this routine gets
++	 * executed, c == &boot_cpu_data.
++	 */
++	if (c != &boot_cpu_data) {
++		/* AND the already accumulated flags with these */
++		for (i = 0 ; i < NCAPINTS ; i++)
++			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++	}
 +
-+#ifdef CONFIG_PROC_VMCORE
-+		/* elfcorehdr= specifies the location of elf core header
-+		 * stored by the crashed kernel. This option will be passed
-+		 * by kexec loader to the capture kernel.
-+		 */
-+		else if(!memcmp(from, "elfcorehdr=", 11))
-+			elfcorehdr_addr = memparse(from+11, &from);
++#ifdef CONFIG_X86_MCE
++	mcheck_init(c);
 +#endif
-+
-+#if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
-+		else if (!memcmp(from, "additional_cpus=", 16))
-+			setup_additional_cpus(from+16);
++	if (c == &boot_cpu_data)
++		mtrr_bp_init();
++	else
++		mtrr_ap_init();
++#ifdef CONFIG_NUMA
++	numa_add_cpu(smp_processor_id());
 +#endif
-+
-+	next_char:
-+		c = *(from++);
-+		if (!c)
-+			break;
-+		if (COMMAND_LINE_SIZE <= ++len)
-+			break;
-+		*(to++) = c;
-+	}
-+	if (userdef) {
-+		printk(KERN_INFO "user-defined physical RAM map:\n");
-+		e820_print_map("user");
-+	}
-+	*to = '\0';
-+	*cmdline_p = command_line;
 +}
++ 
 +
-+#ifndef CONFIG_NUMA
-+static void __init
-+contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
++void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
 +{
-+	unsigned long bootmap_size, bootmap;
++	if (c->x86_model_id[0])
++		printk("%s", c->x86_model_id);
 +
-+	bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
-+	bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
-+	if (bootmap == -1L)
-+		panic("Cannot find bootmem map of size %ld\n",bootmap_size);
-+	bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
-+#ifdef CONFIG_XEN
-+	e820_bootmem_free(NODE_DATA(0), 0, xen_start_info->nr_pages<<PAGE_SHIFT);
-+#else
-+	e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
-+#endif
-+	reserve_bootmem(bootmap, bootmap_size);
-+} 
-+#endif
++	if (c->x86_mask || c->cpuid_level >= 0) 
++		printk(" stepping %02x\n", c->x86_mask);
++	else
++		printk("\n");
++}
 +
-+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-+struct edd edd;
-+#ifdef CONFIG_EDD_MODULE
-+EXPORT_SYMBOL(edd);
-+#endif
-+/**
-+ * copy_edd() - Copy the BIOS EDD information
-+ *              from boot_params into a safe place.
-+ *
++/*
++ *	Get CPU information for use by the procfs.
 + */
-+static inline void copy_edd(void)
-+{
-+     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
-+     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
-+     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
-+     edd.edd_info_nr = EDD_NR;
-+}
-+#else
-+static inline void copy_edd(void)
++
++static int show_cpuinfo(struct seq_file *m, void *v)
 +{
-+}
-+#endif
++	struct cpuinfo_x86 *c = v;
 +
-+#ifndef CONFIG_XEN
-+#define EBDA_ADDR_POINTER 0x40E
++	/* 
++	 * These flag bits must match the definitions in <asm/cpufeature.h>.
++	 * NULL means this bit is undefined or reserved; either way it doesn't
++	 * have meaning as far as Linux is concerned.  Note that it's important
++	 * to realize there is a difference between this table and CPUID -- if
++	 * applications want to get the raw CPUID data, they should access
++	 * /dev/cpu/<cpu_nr>/cpuid instead.
++	 */
++	static char *x86_cap_flags[] = {
++		/* Intel-defined */
++	        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
++	        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
++	        "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
++	        "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
 +
-+unsigned __initdata ebda_addr;
-+unsigned __initdata ebda_size;
++		/* AMD-defined */
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
++		NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
 +
-+static void discover_ebda(void)
-+{
-+	/*
-+	 * there is a real-mode segmented pointer pointing to the 
-+	 * 4K EBDA area at 0x40E
-+	 */
-+	ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
-+	ebda_addr <<= 4;
++		/* Transmeta-defined */
++		"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
 +
-+	ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
++		/* Other (Linux-defined) */
++		"cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
++		"constant_tsc", NULL, NULL,
++		"up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
 +
-+	/* Round EBDA up to pages */
-+	if (ebda_size == 0)
-+		ebda_size = 1;
-+	ebda_size <<= 10;
-+	ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
-+	if (ebda_size > 64*1024)
-+		ebda_size = 64*1024;
-+}
++		/* Intel-defined (#2) */
++		"pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
++		"tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
++		NULL, NULL, "dca", NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++		/* VIA/Cyrix/Centaur-defined */
++		NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++		/* AMD-defined (#2) */
++		"lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++	};
++	static char *x86_power_flags[] = { 
++		"ts",	/* temperature sensor */
++		"fid",  /* frequency id control */
++		"vid",  /* voltage id control */
++		"ttp",  /* thermal trip */
++		"tm",
++		"stc",
++		NULL,
++		/* nothing */	/* constant_tsc - moved to flags */
++	};
++
++
++#ifdef CONFIG_SMP
++	if (!cpu_online(c-cpu_data))
++		return 0;
 +#endif
 +
-+void __init setup_arch(char **cmdline_p)
-+{
-+	struct xen_memory_map memmap;
++	seq_printf(m,"processor\t: %u\n"
++		     "vendor_id\t: %s\n"
++		     "cpu family\t: %d\n"
++		     "model\t\t: %d\n"
++		     "model name\t: %s\n",
++		     (unsigned)(c-cpu_data),
++		     c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
++		     c->x86,
++		     (int)c->x86_model,
++		     c->x86_model_id[0] ? c->x86_model_id : "unknown");
++	
++	if (c->x86_mask || c->cpuid_level >= 0)
++		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
++	else
++		seq_printf(m, "stepping\t: unknown\n");
++	
++	if (cpu_has(c,X86_FEATURE_TSC)) {
++		unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
++		if (!freq)
++			freq = cpu_khz;
++		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
++			     freq / 1000, (freq % 1000));
++	}
 +
-+#ifdef CONFIG_XEN
-+	/* Register a call for panic conditions. */
-+	atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++	/* Cache size */
++	if (c->x86_cache_size >= 0) 
++		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
++	
++#ifdef CONFIG_SMP
++	if (smp_num_siblings * c->x86_max_cores > 1) {
++		int cpu = c - cpu_data;
++		seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
++		seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
++		seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
++		seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
++	}
++#endif	
 +
-+ 	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
-+ 	screen_info = SCREEN_INFO;
++	seq_printf(m,
++	        "fpu\t\t: yes\n"
++	        "fpu_exception\t: yes\n"
++	        "cpuid level\t: %d\n"
++	        "wp\t\t: yes\n"
++	        "flags\t\t:",
++		   c->cpuid_level);
 +
-+	if (is_initial_xendomain()) {
-+		/* This is drawn from a dump from vgacon:startup in
-+		 * standard Linux. */
-+		screen_info.orig_video_mode = 3;
-+		screen_info.orig_video_isVGA = 1;
-+		screen_info.orig_video_lines = 25;
-+		screen_info.orig_video_cols = 80;
-+		screen_info.orig_video_ega_bx = 3;
-+		screen_info.orig_video_points = 16;
-+		screen_info.orig_y = screen_info.orig_video_lines - 1;
-+		if (xen_start_info->console.dom0.info_size >=
-+		    sizeof(struct dom0_vga_console_info)) {
-+			const struct dom0_vga_console_info *info =
-+				(struct dom0_vga_console_info *)(
-+					(char *)xen_start_info +
-+					xen_start_info->console.dom0.info_off);
-+			dom0_init_screen_info(info);
-+		}
-+		xen_start_info->console.domU.mfn = 0;
-+		xen_start_info->console.domU.evtchn = 0;
-+	} else
-+		screen_info.orig_video_isVGA = 0;
++	{ 
++		int i; 
++		for ( i = 0 ; i < 32*NCAPINTS ; i++ )
++			if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
++				seq_printf(m, " %s", x86_cap_flags[i]);
++	}
++		
++	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
++		   c->loops_per_jiffy/(500000/HZ),
++		   (c->loops_per_jiffy/(5000/HZ)) % 100);
 +
-+	edid_info = EDID_INFO;
-+	saved_video_mode = SAVED_VIDEO_MODE;
-+	bootloader_type = LOADER_TYPE;
++	if (c->x86_tlbsize > 0) 
++		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
++	seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
++	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
 +
-+#ifdef CONFIG_BLK_DEV_RAM
-+	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 
++		   c->x86_phys_bits, c->x86_virt_bits);
++
++	seq_printf(m, "power management:");
++	{
++		unsigned i;
++		for (i = 0; i < 32; i++) 
++			if (c->x86_power & (1 << i)) {
++				if (i < ARRAY_SIZE(x86_power_flags) &&
++					x86_power_flags[i])
++					seq_printf(m, "%s%s",
++						x86_power_flags[i][0]?" ":"",
++						x86_power_flags[i]);
++				else
++					seq_printf(m, " [%d]", i);
++			}
++	}
 +
++	seq_printf(m, "\n\n");
 +
-+#endif
++	return 0;
++}
 +
-+	setup_xen_features();
++static void *c_start(struct seq_file *m, loff_t *pos)
++{
++	return *pos < NR_CPUS ? cpu_data + *pos : NULL;
++}
 +
-+	HYPERVISOR_vm_assist(VMASST_CMD_enable,
-+			     VMASST_TYPE_writable_pagetables);
++static void *c_next(struct seq_file *m, void *v, loff_t *pos)
++{
++	++*pos;
++	return c_start(m, pos);
++}
 +
-+	ARCH_SETUP
-+#else
-+ 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
-+ 	screen_info = SCREEN_INFO;
-+	edid_info = EDID_INFO;
-+	saved_video_mode = SAVED_VIDEO_MODE;
-+	bootloader_type = LOADER_TYPE;
++static void c_stop(struct seq_file *m, void *v)
++{
++}
 +
-+#ifdef CONFIG_BLK_DEV_RAM
-+	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-+#endif
-+#endif	/* !CONFIG_XEN */
-+	setup_memory_region();
-+	copy_edd();
++struct seq_operations cpuinfo_op = {
++	.start =c_start,
++	.next =	c_next,
++	.stop =	c_stop,
++	.show =	show_cpuinfo,
++};
 +
-+	if (!MOUNT_ROOT_RDONLY)
-+		root_mountflags &= ~MS_RDONLY;
-+	init_mm.start_code = (unsigned long) &_text;
-+	init_mm.end_code = (unsigned long) &_etext;
-+	init_mm.end_data = (unsigned long) &_edata;
-+	init_mm.brk = (unsigned long) &_end;
++#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
++#include <linux/platform_device.h>
++static __init int add_pcspkr(void)
++{
++	struct platform_device *pd;
++	int ret;
 +
-+	code_resource.start = virt_to_phys(&_text);
-+	code_resource.end = virt_to_phys(&_etext)-1;
-+	data_resource.start = virt_to_phys(&_etext);
-+	data_resource.end = virt_to_phys(&_edata)-1;
++	pd = platform_device_alloc("pcspkr", -1);
++	if (!pd)
++		return -ENOMEM;
 +
-+	parse_cmdline_early(cmdline_p);
++	ret = platform_device_add(pd);
++	if (ret)
++		platform_device_put(pd);
 +
-+	early_identify_cpu(&boot_cpu_data);
++	return ret;
++}
++device_initcall(add_pcspkr);
++#endif
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/smp-xen.c b/arch/x86_64/kernel/smp-xen.c
+--- a/arch/x86_64/kernel/smp-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/smp-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,576 @@
++/*
++ *	Intel SMP support routines.
++ *
++ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
++ *	(c) 1998-99, 2000 Ingo Molnar <mingo at redhat.com>
++ *      (c) 2002,2003 Andi Kleen, SuSE Labs.
++ *
++ *	This code is released under the GNU General Public License version 2 or
++ *	later.
++ */
 +
-+	/*
-+	 * partially used pages are not usable - thus
-+	 * we are rounding upwards:
-+	 */
-+	end_pfn = e820_end_of_ram();
-+	num_physpages = end_pfn;		/* for pfn_valid */
++#include <linux/init.h>
 +
-+	check_efer();
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/smp.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/interrupt.h>
 +
-+#ifndef CONFIG_XEN
-+	discover_ebda();
++#include <asm/mtrr.h>
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++#include <asm/mach_apic.h>
++#include <asm/mmu_context.h>
++#include <asm/proto.h>
++#include <asm/apicdef.h>
++#include <asm/idle.h>
++#ifdef CONFIG_XEN
++#include <xen/evtchn.h>
 +#endif
 +
-+	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
++#ifndef CONFIG_XEN
++/*
++ *	Smarter SMP flushing macros. 
++ *		c/o Linus Torvalds.
++ *
++ *	These mean you can really definitely utterly forget about
++ *	writing to user space from interrupts. (Its not allowed anyway).
++ *
++ *	Optimizations Manfred Spraul <manfred at colorfullife.com>
++ *
++ * 	More scalable flush, from Andi Kleen
++ *
++ * 	To avoid global state use 8 different call vectors.
++ * 	Each CPU uses a specific vector to trigger flushes on other
++ * 	CPUs. Depending on the received vector the target CPUs look into
++ *	the right per cpu variable for the flush data.
++ *
++ * 	With more than 8 CPUs they are hashed to the 8 available
++ * 	vectors. The limited global vector space forces us to this right now.
++ *	In future when interrupts are split into per CPU domains this could be
++ *	fixed, at the cost of triggering multiple IPIs in some cases.
++ */
 +
-+#ifdef CONFIG_ACPI_NUMA
-+	/*
-+	 * Parse SRAT to discover nodes.
-+	 */
-+	acpi_numa_init();
-+#endif
++union smp_flush_state {
++	struct {
++		cpumask_t flush_cpumask;
++		struct mm_struct *flush_mm;
++		unsigned long flush_va;
++#define FLUSH_ALL	-1ULL
++		spinlock_t tlbstate_lock;
++	};
++	char pad[SMP_CACHE_BYTES];
++} ____cacheline_aligned;
 +
-+#ifdef CONFIG_NUMA
-+	numa_initmem_init(0, end_pfn); 
-+#else
-+	contig_initmem_init(0, end_pfn);
++/* State is put into the per CPU data section, but padded
++   to a full cache line because other CPUs can access it and we don't
++   want false sharing in the per cpu data segment. */
++static DEFINE_PER_CPU(union smp_flush_state, flush_state);
 +#endif
 +
-+	/* Reserve direct mapping */
-+	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
-+				(table_end - table_start) << PAGE_SHIFT);
++/*
++ * We cannot call mmdrop() because we are in interrupt context, 
++ * instead update mm->cpu_vm_mask.
++ */
++static inline void leave_mm(int cpu)
++{
++	if (read_pda(mmu_state) == TLBSTATE_OK)
++		BUG();
++	cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
++	load_cr3(swapper_pg_dir);
++}
 +
-+	/* reserve kernel */
-+	reserve_bootmem_generic(__pa_symbol(&_text),
-+				__pa_symbol(&_end) - __pa_symbol(&_text));
++#ifndef CONFIG_XEN
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * 	Stop ipi delivery for the old mm. This is not synchronized with
++ * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * 	for the wrong mm, and in the worst case we perform a superfluous
++ * 	tlb flush.
++ * 1a2) set cpu mmu_state to TLBSTATE_OK
++ * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ *	was in lazy tlb mode.
++ * 1a3) update cpu active_mm
++ * 	Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * 	Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ *	cpu active_mm is correct, cpu0 already handles
++ *	flush ipis.
++ * 1b1) set cpu mmu_state to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * 	Atomically set the bit [other cpus will start sending flush ipis],
++ * 	and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ *   runs in kernel space, the cpu could load tlb entries for user space
++ *   pages.
++ *
++ * The good news is that cpu mmu_state is local to each cpu, no
++ * write/read ordering problems.
++ */
 +
-+#ifdef CONFIG_XEN
-+	/* reserve physmap, start info and initial page tables */
-+	reserve_bootmem(__pa_symbol(&_end), (table_start<<PAGE_SHIFT)-__pa_symbol(&_end));
-+#else
-+	/*
-+	 * reserve physical page 0 - it's a special BIOS page on many boxes,
-+	 * enabling clean reboots, SMP operation, laptop functions.
-+	 */
-+	reserve_bootmem_generic(0, PAGE_SIZE);
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ *
++ * Interrupts are disabled.
++ */
 +
-+	/* reserve ebda region */
-+	if (ebda_addr)
-+		reserve_bootmem_generic(ebda_addr, ebda_size);
-+#endif
++asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
++{
++	int cpu;
++	int sender;
++	union smp_flush_state *f;
 +
-+#ifdef CONFIG_SMP
++	cpu = smp_processor_id();
 +	/*
-+	 * But first pinch a few for the stack/trampoline stuff
-+	 * FIXME: Don't need the extra page at 4K, but need to fix
-+	 * trampoline before removing it. (see the GDT stuff)
++	 * orig_rax contains the interrupt vector - 256.
++	 * Use that to determine where the sender put the data.
 +	 */
-+	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
-+
-+	/* Reserve SMP trampoline */
-+	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
-+#endif
++	sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
++	f = &per_cpu(flush_state, sender);
 +
-+#ifdef CONFIG_ACPI_SLEEP
-+       /*
-+        * Reserve low memory region for sleep support.
-+        */
-+       acpi_reserve_bootmem();
-+#endif
-+#ifdef CONFIG_XEN
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	if (xen_start_info->mod_start) {
-+		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
-+			/*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
-+			initrd_start = INITRD_START + PAGE_OFFSET;
-+			initrd_end = initrd_start+INITRD_SIZE;
-+			initrd_below_start_ok = 1;
-+		} else {
-+			printk(KERN_ERR "initrd extends beyond end of memory "
-+				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+				(unsigned long)(INITRD_START + INITRD_SIZE),
-+				(unsigned long)(end_pfn << PAGE_SHIFT));
-+			initrd_start = 0;
-+		}
-+	}
-+#endif
-+#else	/* CONFIG_XEN */
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	if (LOADER_TYPE && INITRD_START) {
-+		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
-+			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
-+			initrd_start =
-+				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
-+			initrd_end = initrd_start+INITRD_SIZE;
-+		}
-+		else {
-+			printk(KERN_ERR "initrd extends beyond end of memory "
-+			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+			    (unsigned long)(INITRD_START + INITRD_SIZE),
-+			    (unsigned long)(end_pfn << PAGE_SHIFT));
-+			initrd_start = 0;
-+		}
-+	}
-+#endif
-+#endif	/* !CONFIG_XEN */
-+#ifdef CONFIG_KEXEC
-+	if (crashk_res.start != crashk_res.end) {
-+		reserve_bootmem_generic(crashk_res.start,
-+			crashk_res.end - crashk_res.start + 1);
++	if (!cpu_isset(cpu, f->flush_cpumask))
++		goto out;
++		/* 
++		 * This was a BUG() but until someone can quote me the
++		 * line from the intel manual that guarantees an IPI to
++		 * multiple CPUs is retried _only_ on the erroring CPUs
++		 * its staying as a return
++		 *
++		 * BUG();
++		 */
++		 
++	if (f->flush_mm == read_pda(active_mm)) {
++		if (read_pda(mmu_state) == TLBSTATE_OK) {
++			if (f->flush_va == FLUSH_ALL)
++				local_flush_tlb();
++			else
++				__flush_tlb_one(f->flush_va);
++		} else
++			leave_mm(cpu);
 +	}
-+#endif
-+
-+	paging_init();
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	/*
-+	 * Find and reserve possible boot-time SMP configuration:
-+	 */
-+	find_smp_config();
-+#endif
-+#ifdef CONFIG_XEN
-+	{
-+		int i, j, k, fpp;
-+
-+		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+			/* Make sure we have a large enough P->M table. */
-+			phys_to_machine_mapping = alloc_bootmem_pages(
-+				end_pfn * sizeof(unsigned long));
-+			memset(phys_to_machine_mapping, ~0,
-+			       end_pfn * sizeof(unsigned long));
-+			memcpy(phys_to_machine_mapping,
-+			       (unsigned long *)xen_start_info->mfn_list,
-+			       xen_start_info->nr_pages * sizeof(unsigned long));
-+			free_bootmem(
-+				__pa(xen_start_info->mfn_list),
-+				PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
-+						sizeof(unsigned long))));
++out:
++	ack_APIC_irq();
++	cpu_clear(cpu, f->flush_cpumask);
++}
 +
-+			/*
-+			 * Initialise the list of the frames that specify the
-+			 * list of frames that make up the p2m table. Used by
-+                         * save/restore.
-+			 */
-+			pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
-+			HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+				virt_to_mfn(pfn_to_mfn_frame_list_list);
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++						unsigned long va)
++{
++	int sender;
++	union smp_flush_state *f;
 +
-+			fpp = PAGE_SIZE/sizeof(unsigned long);
-+			for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
-+				if ((j % fpp) == 0) {
-+					k++;
-+					BUG_ON(k>=fpp);
-+					pfn_to_mfn_frame_list[k] =
-+						alloc_bootmem_pages(PAGE_SIZE);
-+					pfn_to_mfn_frame_list_list[k] =
-+						virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+					j=0;
-+				}
-+				pfn_to_mfn_frame_list[k][j] =
-+					virt_to_mfn(&phys_to_machine_mapping[i]);
-+			}
-+			HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
-+		}
++	/* Caller has disabled preemption */
++	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
++	f = &per_cpu(flush_state, sender);
 +
-+	}
++	/* Could avoid this lock when
++	   num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
++	   probably not worth checking this for a cache-hot lock. */
++	spin_lock(&f->tlbstate_lock);
 +
-+	if (is_initial_xendomain())
-+		dmi_scan_machine();
++	f->flush_mm = mm;
++	f->flush_va = va;
++	cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
 +
-+	if (!is_initial_xendomain()) {
-+		acpi_disabled = 1;
-+#ifdef  CONFIG_ACPI
-+		acpi_ht = 0;
-+#endif
-+	}
-+#endif
++	/*
++	 * We have to send the IPI only to
++	 * CPUs affected.
++	 */
++	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
 +
-+#ifndef CONFIG_XEN
-+	check_ioapic();
-+#endif
++	while (!cpus_empty(f->flush_cpumask))
++		cpu_relax();
 +
-+	zap_low_mappings(0);
++	f->flush_mm = NULL;
++	f->flush_va = 0;
++	spin_unlock(&f->tlbstate_lock);
++}
 +
-+	/*
-+	 * set this early, so we dont allocate cpu0
-+	 * if MADT list doesnt list BSP first
-+	 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
-+	 */
-+	cpu_set(0, cpu_present_map);
-+#ifdef CONFIG_ACPI
-+	/*
-+	 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
-+	 * Call this early for SRAT node setup.
-+	 */
-+	acpi_boot_table_init();
++int __cpuinit init_smp_flush(void)
++{
++	int i;
++	for_each_cpu_mask(i, cpu_possible_map) {
++		spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
++	}
++	return 0;
++}
 +
-+	/*
-+	 * Read APIC and some other early information from ACPI tables.
-+	 */
-+	acpi_boot_init();
-+#endif
++core_initcall(init_smp_flush);
++	
++void flush_tlb_current_task(void)
++{
++	struct mm_struct *mm = current->mm;
++	cpumask_t cpu_mask;
 +
-+	init_cpu_to_node();
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	/*
-+	 * get boot-time SMP configuration:
-+	 */
-+	if (smp_found_config)
-+		get_smp_config();
-+#ifndef CONFIG_XEN
-+	init_apic_mappings();
-+#endif
-+#endif
-+#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
-+	prefill_possible_map();
-+#endif
++	local_flush_tlb();
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++	preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_current_task);
 +
-+	/*
-+	 * Request address space for all standard RAM and ROM resources
-+	 * and also for regions reported as reserved by the e820.
-+	 */
-+	probe_roms();
-+#ifdef CONFIG_XEN
-+	if (is_initial_xendomain()) {
-+		memmap.nr_entries = E820MAX;
-+		set_xen_guest_handle(memmap.buffer, machine_e820.map);
++void flush_tlb_mm (struct mm_struct * mm)
++{
++	cpumask_t cpu_mask;
 +
-+		if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
-+			BUG();
-+		machine_e820.nr_map = memmap.nr_entries;
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
 +
-+		e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
++	if (current->active_mm == mm) {
++		if (current->mm)
++			local_flush_tlb();
++		else
++			leave_mm(smp_processor_id());
 +	}
-+#else
-+	e820_reserve_resources(e820.map, e820.nr_map);
-+#endif
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
 +
-+	request_resource(&iomem_resource, &video_ram_resource);
++	preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_mm);
 +
-+	{
-+	unsigned i;
-+	/* request I/O space for devices used on all i[345]86 PCs */
-+	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
-+		request_resource(&ioport_resource, &standard_io_resources[i]);
-+	}
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++	struct mm_struct *mm = vma->vm_mm;
++	cpumask_t cpu_mask;
 +
-+#ifdef CONFIG_XEN
-+	if (is_initial_xendomain())
-+		e820_setup_gap(machine_e820.map, machine_e820.nr_map);
-+#else
-+	e820_setup_gap(e820.map, e820.nr_map);
-+#endif
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
 +
-+#ifdef CONFIG_XEN
-+	{
-+		struct physdev_set_iopl set_iopl;
++	if (current->active_mm == mm) {
++		if(current->mm)
++			__flush_tlb_one(va);
++		 else
++		 	leave_mm(smp_processor_id());
++	}
 +
-+		set_iopl.iopl = 1;
-+		HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, va);
 +
-+		if (is_initial_xendomain()) {
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+			conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+			conswitchp = &dummy_con;
-+#endif
-+#endif
-+		} else {
-+			extern int console_use_vt;
-+			console_use_vt = 0;
-+		}
-+	}
-+#else	/* CONFIG_XEN */
++	preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_page);
 +
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+	conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+	conswitchp = &dummy_con;
-+#endif
-+#endif
++static void do_flush_tlb_all(void* info)
++{
++	unsigned long cpu = smp_processor_id();
 +
-+#endif /* !CONFIG_XEN */
++	__flush_tlb_all();
++	if (read_pda(mmu_state) == TLBSTATE_LAZY)
++		leave_mm(cpu);
 +}
 +
-+#ifdef CONFIG_XEN
-+static int
-+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++void flush_tlb_all(void)
 +{
-+	HYPERVISOR_shutdown(SHUTDOWN_crash);
-+	/* we're never actually going to get here... */
-+	return NOTIFY_DONE;
++	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
 +}
-+#endif /* !CONFIG_XEN */
++#else
++asmlinkage void smp_invalidate_interrupt (void)
++{ return; }
++void flush_tlb_current_task(void)
++{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
++void flush_tlb_mm (struct mm_struct * mm)
++{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
++void flush_tlb_all(void)
++{ xen_tlb_flush_all(); }
++#endif /* Xen */
 +
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
 +
-+static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++void smp_send_reschedule(int cpu)
 +{
-+	unsigned int *v;
++	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
 +
-+	if (c->extended_cpuid_level < 0x80000004)
-+		return 0;
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
 +
-+	v = (unsigned int *) c->x86_model_id;
-+	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-+	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-+	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-+	c->x86_model_id[48] = 0;
-+	return 1;
++struct call_data_struct {
++	void (*func) (void *info);
++	void *info;
++	atomic_t started;
++	atomic_t finished;
++	int wait;
++};
++
++static struct call_data_struct * call_data;
++
++void lock_ipi_call_lock(void)
++{
++	spin_lock_irq(&call_lock);
 +}
 +
++void unlock_ipi_call_lock(void)
++{
++	spin_unlock_irq(&call_lock);
++}
 +
-+static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
++/*
++ * this function sends a 'generic call function' IPI to one other CPU
++ * in the system.
++ *
++ * cpu is a standard Linux logical CPU number.
++ */
++static void
++__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
++				int nonatomic, int wait)
 +{
-+	unsigned int n, dummy, eax, ebx, ecx, edx;
++	struct call_data_struct data;
++	int cpus = 1;
 +
-+	n = c->extended_cpuid_level;
++	data.func = func;
++	data.info = info;
++	atomic_set(&data.started, 0);
++	data.wait = wait;
++	if (wait)
++		atomic_set(&data.finished, 0);
 +
-+	if (n >= 0x80000005) {
-+		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
-+		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-+			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-+		c->x86_cache_size=(ecx>>24)+(edx>>24);
-+		/* On K8 L1 TLB is inclusive, so don't count it */
-+		c->x86_tlbsize = 0;
-+	}
++	call_data = &data;
++	wmb();
++	/* Send a message to all other CPUs and wait for them to respond */
++	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
 +
-+	if (n >= 0x80000006) {
-+		cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
-+		ecx = cpuid_ecx(0x80000006);
-+		c->x86_cache_size = ecx >> 16;
-+		c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
++	/* Wait for response */
++	while (atomic_read(&data.started) != cpus)
++		cpu_relax();
 +
-+		printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-+		c->x86_cache_size, ecx & 0xFF);
-+	}
++	if (!wait)
++		return;
 +
-+	if (n >= 0x80000007)
-+		cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 
-+	if (n >= 0x80000008) {
-+		cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 
-+		c->x86_virt_bits = (eax >> 8) & 0xff;
-+		c->x86_phys_bits = eax & 0xff;
-+	}
++	while (atomic_read(&data.finished) != cpus)
++		cpu_relax();
 +}
 +
-+#ifdef CONFIG_NUMA
-+static int nearby_node(int apicid)
++/*
++ * smp_call_function_single - Run a function on another CPU
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: Currently unused.
++ * @wait: If true, wait until function has completed on other CPUs.
++ *
++ * Retrurns 0 on success, else a negative status code.
++ *
++ * Does not return until the remote CPU is nearly ready to execute <func>
++ * or is or has executed.
++ */
++
++int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
++	int nonatomic, int wait)
 +{
-+	int i;
-+	for (i = apicid - 1; i >= 0; i--) {
-+		int node = apicid_to_node[i];
-+		if (node != NUMA_NO_NODE && node_online(node))
-+			return node;
-+	}
-+	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
-+		int node = apicid_to_node[i];
-+		if (node != NUMA_NO_NODE && node_online(node))
-+			return node;
++	/* prevent preemption and reschedule on another processor */
++	int me = get_cpu();
++	if (cpu == me) {
++		put_cpu();
++		return 0;
 +	}
-+	return first_node(node_online_map); /* Shouldn't happen */
++
++	/* Can deadlock when called with interrupts disabled */
++	WARN_ON(irqs_disabled());
++
++	spin_lock_bh(&call_lock);
++	__smp_call_function_single(cpu, func, info, nonatomic, wait);
++	spin_unlock_bh(&call_lock);
++	put_cpu();
++	return 0;
 +}
-+#endif
++EXPORT_SYMBOL(smp_call_function_single);
 +
 +/*
-+ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
-+ * Assumes number of cores is a power of two.
++ * this function sends a 'generic call function' IPI to all other CPUs
++ * in the system.
 + */
-+static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
++static void __smp_call_function (void (*func) (void *info), void *info,
++				int nonatomic, int wait)
 +{
-+#ifdef CONFIG_SMP
-+	unsigned bits;
-+#ifdef CONFIG_NUMA
-+	int cpu = smp_processor_id();
-+	int node = 0;
-+	unsigned apicid = hard_smp_processor_id();
++	struct call_data_struct data;
++	int cpus = num_online_cpus()-1;
++
++	if (!cpus)
++		return;
++
++	data.func = func;
++	data.info = info;
++	atomic_set(&data.started, 0);
++	data.wait = wait;
++	if (wait)
++		atomic_set(&data.finished, 0);
++
++	call_data = &data;
++	wmb();
++	/* Send a message to all other CPUs and wait for them to respond */
++	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++	/* Wait for response */
++	while (atomic_read(&data.started) != cpus)
++#ifndef CONFIG_XEN
++		cpu_relax();
++#else
++		barrier();
 +#endif
-+	unsigned ecx = cpuid_ecx(0x80000008);
 +
-+	c->x86_max_cores = (ecx & 0xff) + 1;
++	if (!wait)
++		return;
 +
-+	/* CPU telling us the core id bits shift? */
-+	bits = (ecx >> 12) & 0xF;
++	while (atomic_read(&data.finished) != cpus)
++#ifndef CONFIG_XEN
++		cpu_relax();
++#else
++		barrier();
++#endif
++}
 +
-+	/* Otherwise recompute */
-+	if (bits == 0) {
-+		while ((1 << bits) < c->x86_max_cores)
-+			bits++;
-+	}
++/*
++ * smp_call_function - run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other
++ *        CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute func or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ * Actually there are a few legal cases, like panic.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++			int wait)
++{
++	spin_lock(&call_lock);
++	__smp_call_function(func,info,nonatomic,wait);
++	spin_unlock(&call_lock);
++	return 0;
++}
++EXPORT_SYMBOL(smp_call_function);
 +
-+	/* Low order bits define the core id (index of core in socket) */
-+	c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
-+	/* Convert the APIC ID into the socket ID */
-+	c->phys_proc_id = phys_pkg_id(bits);
++void smp_stop_cpu(void)
++{
++	unsigned long flags;
++	/*
++	 * Remove this CPU:
++	 */
++	cpu_clear(smp_processor_id(), cpu_online_map);
++	local_irq_save(flags);
++#ifndef CONFIG_XEN
++	disable_local_APIC();
++#endif
++	local_irq_restore(flags);
++}
 +
-+#ifdef CONFIG_NUMA
-+  	node = c->phys_proc_id;
-+ 	if (apicid_to_node[apicid] != NUMA_NO_NODE)
-+ 		node = apicid_to_node[apicid];
-+ 	if (!node_online(node)) {
-+ 		/* Two possibilities here:
-+ 		   - The CPU is missing memory and no node was created.
-+ 		   In that case try picking one from a nearby CPU
-+ 		   - The APIC IDs differ from the HyperTransport node IDs
-+ 		   which the K8 northbridge parsing fills in.
-+ 		   Assume they are all increased by a constant offset,
-+ 		   but in the same order as the HT nodeids.
-+ 		   If that doesn't result in a usable node fall back to the
-+ 		   path for the previous case.  */
-+ 		int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
-+ 		if (ht_nodeid >= 0 &&
-+ 		    apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
-+ 			node = apicid_to_node[ht_nodeid];
-+ 		/* Pick a nearby node */
-+ 		if (!node_online(node))
-+ 			node = nearby_node(apicid);
-+ 	}
-+	numa_set_node(cpu, node);
++static void smp_really_stop_cpu(void *dummy)
++{
++	smp_stop_cpu(); 
++	for (;;) 
++		halt();
++} 
 +
-+	printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++void smp_send_stop(void)
++{
++	int nolock = 0;
++#ifndef CONFIG_XEN
++	if (reboot_force)
++		return;
 +#endif
++	/* Don't deadlock on the call lock in panic */
++	if (!spin_trylock(&call_lock)) {
++		/* ignore locking because we have panicked anyways */
++		nolock = 1;
++	}
++	__smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
++	if (!nolock)
++		spin_unlock(&call_lock);
++
++	local_irq_disable();
++#ifndef CONFIG_XEN
++	disable_local_APIC();
 +#endif
++	local_irq_enable();
 +}
 +
-+static void __init init_amd(struct cpuinfo_x86 *c)
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++#ifndef CONFIG_XEN
++asmlinkage void smp_reschedule_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_reschedule_interrupt(void)
++#endif
 +{
-+	unsigned level;
++#ifndef CONFIG_XEN
++	ack_APIC_irq();
++#else
++	return IRQ_HANDLED;
++#endif
++}
 +
-+#ifdef CONFIG_SMP
-+	unsigned long value;
++#ifndef CONFIG_XEN
++asmlinkage void smp_call_function_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_call_function_interrupt(void)
++#endif
++{
++	void (*func) (void *info) = call_data->func;
++	void *info = call_data->info;
++	int wait = call_data->wait;
 +
++#ifndef CONFIG_XEN
++	ack_APIC_irq();
++#endif
 +	/*
-+	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
-+	 * bit 6 of msr C001_0015
-+ 	 *
-+	 * Errata 63 for SH-B3 steppings
-+	 * Errata 122 for all steppings (F+ have it disabled by default)
++	 * Notify initiating CPU that I've grabbed the data and am
++	 * about to execute the function
 +	 */
-+	if (c->x86 == 15) {
-+		rdmsrl(MSR_K8_HWCR, value);
-+		value |= 1 << 6;
-+		wrmsrl(MSR_K8_HWCR, value);
++	mb();
++	atomic_inc(&call_data->started);
++	/*
++	 * At this point the info structure may be out of scope unless wait==1
++	 */
++	exit_idle();
++	irq_enter();
++	(*func)(info);
++	irq_exit();
++	if (wait) {
++		mb();
++		atomic_inc(&call_data->finished);
 +	}
++#ifdef CONFIG_XEN
++	return IRQ_HANDLED;
 +#endif
++}
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/time-xen.c b/arch/x86_64/kernel/time-xen.c
+--- a/arch/x86_64/kernel/time-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/time-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,1057 @@
++/*
++ *  linux/arch/i386/kernel/time.c
++ *
++ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
++ *
++ * This file contains the PC-specific time handling details:
++ * reading the RTC at bootup, etc..
++ * 1994-07-02    Alan Modra
++ *	fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
++ * 1995-03-26    Markus Kuhn
++ *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
++ *      precision CMOS clock update
++ * 1996-05-03    Ingo Molnar
++ *      fixed time warps in do_[slow|fast]_gettimeoffset()
++ * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
++ *		"A Kernel Model for Precision Timekeeping" by Dave Mills
++ * 1998-09-05    (Various)
++ *	More robust do_fast_gettimeoffset() algorithm implemented
++ *	(works with APM, Cyrix 6x86MX and Centaur C6),
++ *	monotonic gettimeofday() with fast_get_timeoffset(),
++ *	drift-proof precision TSC calibration on boot
++ *	(C. Scott Ananian <cananian at alumni.princeton.edu>, Andrew D.
++ *	Balsa <andrebalsa at altern.org>, Philip Gladstone <philip at raptor.com>;
++ *	ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause at tu-harburg.de>).
++ * 1998-12-16    Andrea Arcangeli
++ *	Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
++ *	because was not accounting lost_ticks.
++ * 1998-12-24 Copyright (C) 1998  Andrea Arcangeli
++ *	Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
++ *	serialize accesses to xtime/lost_ticks).
++ */
 +
-+	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-+	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-+	clear_bit(0*32+31, &c->x86_capability);
-+	
-+	/* On C+ stepping K8 rep microcode works well for copy/memset */
-+	level = cpuid_eax(1);
-+	if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
-+		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/param.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/time.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++#include <linux/bcd.h>
++#include <linux/efi.h>
++#include <linux/mca.h>
++#include <linux/sysctl.h>
++#include <linux/percpu.h>
++#include <linux/kernel_stat.h>
++#include <linux/posix-timers.h>
 +
-+	/* Enable workaround for FXSAVE leak */
-+	if (c->x86 >= 6)
-+		set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/irq.h>
++#include <asm/msr.h>
++#include <asm/delay.h>
++#include <asm/mpspec.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/timer.h>
++#include <asm/sections.h>
 +
-+	level = get_model_name(c);
-+	if (!level) { 
-+		switch (c->x86) { 
-+		case 15:
-+			/* Should distinguish Models here, but this is only
-+			   a fallback anyways. */
-+			strcpy(c->x86_model_id, "Hammer");
-+			break; 
-+		} 
-+	} 
-+	display_cacheinfo(c);
++#include "mach_time.h"
 +
-+	/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
-+	if (c->x86_power & (1<<8))
-+		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++#include <linux/timex.h>
 +
-+	/* Multi core CPU? */
-+	if (c->extended_cpuid_level >= 0x80000008)
-+		amd_detect_cmp(c);
++#include <asm/hpet.h>
 +
-+	/* Fix cpuid4 emulation for more */
-+	num_cache_leaves = 3;
-+}
++#include <asm/arch_hooks.h>
 +
-+static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
-+{
-+#ifdef CONFIG_SMP
-+	u32 	eax, ebx, ecx, edx;
-+	int 	index_msb, core_bits;
-+	int 	cpu = smp_processor_id();
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
 +
-+	cpuid(1, &eax, &ebx, &ecx, &edx);
++int pit_latch_buggy;              /* extern */
++
++unsigned long vxtime_hz = PIT_TICK_RATE;
++struct vxtime_data __vxtime __section_vxtime;   /* for vsyscalls */
++volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
++struct timespec __xtime __section_xtime;
++struct timezone __sys_tz __section_sys_tz;
++
++#define USEC_PER_TICK (USEC_PER_SEC / HZ)
++#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
++#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
 +
++#define NS_SCALE	10 /* 2^10, carefully chosen */
++#define US_SCALE	32 /* 2^32, arbitralrily chosen */
 +
-+	if (!cpu_has(c, X86_FEATURE_HT))
-+		return;
-+ 	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
-+		goto out;
++unsigned int cpu_khz;	/* Detected as we calibrate the TSC */
++EXPORT_SYMBOL(cpu_khz);
 +
-+	smp_num_siblings = (ebx & 0xff0000) >> 16;
++DEFINE_SPINLOCK(rtc_lock);
++EXPORT_SYMBOL(rtc_lock);
 +
-+	if (smp_num_siblings == 1) {
-+		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
-+	} else if (smp_num_siblings > 1 ) {
++extern struct init_timer_opts timer_tsc_init;
++extern struct timer_opts timer_tsc;
++#define timer_none timer_tsc
 +
-+		if (smp_num_siblings > NR_CPUS) {
-+			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
-+			smp_num_siblings = 1;
-+			return;
-+		}
++/* These are peridically updated in shared_info, and then copied here. */
++struct shadow_time_info {
++	u64 tsc_timestamp;     /* TSC at last update of time vals.  */
++	u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
++	u32 tsc_to_nsec_mul;
++	u32 tsc_to_usec_mul;
++	int tsc_shift;
++	u32 version;
++};
++static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
++static struct timespec shadow_tv;
++static u32 shadow_tv_version;
 +
-+		index_msb = get_count_order(smp_num_siblings);
-+		c->phys_proc_id = phys_pkg_id(index_msb);
++/* Keep track of last time we did processing/updating of jiffies and xtime. */
++static u64 processed_system_time;   /* System time (ns) at last processing. */
++static DEFINE_PER_CPU(u64, processed_system_time);
 +
-+		smp_num_siblings = smp_num_siblings / c->x86_max_cores;
++/* How much CPU time was spent blocked and how much was 'stolen'? */
++static DEFINE_PER_CPU(u64, processed_stolen_time);
++static DEFINE_PER_CPU(u64, processed_blocked_time);
 +
-+		index_msb = get_count_order(smp_num_siblings) ;
++/* Current runstate of each CPU (updated automatically by the hypervisor). */
++static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
 +
-+		core_bits = get_count_order(c->x86_max_cores);
++/* Must be signed, as it's compared with s64 quantities which can be -ve. */
++#define NS_PER_TICK (1000000000LL/HZ)
 +
-+		c->cpu_core_id = phys_pkg_id(index_msb) &
-+					       ((1 << core_bits) - 1);
++static inline void __normalize_time(time_t *sec, s64 *nsec)
++{
++	while (*nsec >= NSEC_PER_SEC) {
++		(*nsec) -= NSEC_PER_SEC;
++		(*sec)++;
 +	}
-+out:
-+	if ((c->x86_max_cores * smp_num_siblings) > 1) {
-+		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
-+		printk(KERN_INFO  "CPU: Processor Core ID: %d\n", c->cpu_core_id);
++	while (*nsec < 0) {
++		(*nsec) += NSEC_PER_SEC;
++		(*sec)--;
 +	}
++}
++
++/* Does this guest OS track Xen time, or set its wall clock independently? */
++static int independent_wallclock = 0;
++static int __init __independent_wallclock(char *str)
++{
++	independent_wallclock = 1;
++	return 1;
++}
++__setup("independent_wallclock", __independent_wallclock);
++
++/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
++static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
++static int __init __permitted_clock_jitter(char *str)
++{
++	permitted_clock_jitter = simple_strtoul(str, NULL, 0);
++	return 1;
++}
++__setup("permitted_clock_jitter=", __permitted_clock_jitter);
++
++#ifndef CONFIG_X86
++int tsc_disable __devinitdata = 0;
 +#endif
++
++static void delay_tsc(unsigned long loops)
++{
++	unsigned long bclock, now;
++
++	rdtscl(bclock);
++	do {
++		rep_nop();
++		rdtscl(now);
++	} while ((now - bclock) < loops);
 +}
 +
++struct timer_opts timer_tsc = {
++	.name = "tsc",
++	.delay = delay_tsc,
++};
++
 +/*
-+ * find out the number of processor cores on the die
++ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
++ * yielding a 64-bit result.
 + */
-+static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
++static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
 +{
-+	unsigned int eax, t;
++	u64 product;
 +
-+	if (c->cpuid_level < 4)
-+		return 1;
++	if (shift < 0)
++		delta >>= -shift;
++	else
++		delta <<= shift;
 +
-+	cpuid_count(4, 0, &eax, &t, &t, &t);
++	__asm__ (
++		"mul %%rdx ; shrd $32,%%rdx,%%rax"
++		: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
 +
-+	if (eax & 0x1f)
-+		return ((eax >> 26) + 1);
-+	else
-+		return 1;
++	return product;
 +}
 +
-+static void srat_detect_node(void)
++void init_cpu_khz(void)
 +{
-+#ifdef CONFIG_NUMA
-+	unsigned node;
-+	int cpu = smp_processor_id();
-+	int apicid = hard_smp_processor_id();
++	u64 __cpu_khz = 1000000ULL << US_SCALE;
++	struct vcpu_time_info *info;
++	info = &HYPERVISOR_shared_info->vcpu_info[0].time;
++	do_div(__cpu_khz, info->tsc_to_system_mul);
++	if (info->tsc_shift < 0)
++		cpu_khz = __cpu_khz << -info->tsc_shift;
++	else
++		cpu_khz = __cpu_khz >> info->tsc_shift;
++}
 +
-+	/* Don't do the funky fallback heuristics the AMD version employs
-+	   for now. */
-+	node = apicid_to_node[apicid];
-+	if (node == NUMA_NO_NODE)
-+		node = first_node(node_online_map);
-+	numa_set_node(cpu, node);
++static u64 get_nsec_offset(struct shadow_time_info *shadow)
++{
++	u64 now, delta;
++	rdtscll(now);
++	delta = now - shadow->tsc_timestamp;
++	return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
++}
 +
-+	if (acpi_numa > 0)
-+		printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
-+#endif
++static unsigned long get_usec_offset(struct shadow_time_info *shadow)
++{
++	u64 now, delta;
++	rdtscll(now);
++	delta = now - shadow->tsc_timestamp;
++	return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
 +}
 +
-+static void __cpuinit init_intel(struct cpuinfo_x86 *c)
++static void __update_wallclock(time_t sec, long nsec)
 +{
-+	/* Cache sizes */
-+	unsigned n;
++	long wtm_nsec, xtime_nsec;
++	time_t wtm_sec, xtime_sec;
++	u64 tmp, wc_nsec;
 +
-+	init_intel_cacheinfo(c);
-+	if (c->cpuid_level > 9 ) {
-+		unsigned eax = cpuid_eax(10);
-+		/* Check for version and the number of counters */
-+		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
-+			set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
-+	}
++	/* Adjust wall-clock time base based on jiffies ticks. */
++	wc_nsec = processed_system_time;
++	wc_nsec += sec * (u64)NSEC_PER_SEC;
++	wc_nsec += nsec;
 +
-+	n = c->extended_cpuid_level;
-+	if (n >= 0x80000008) {
-+		unsigned eax = cpuid_eax(0x80000008);
-+		c->x86_virt_bits = (eax >> 8) & 0xff;
-+		c->x86_phys_bits = eax & 0xff;
-+		/* CPUID workaround for Intel 0F34 CPU */
-+		if (c->x86_vendor == X86_VENDOR_INTEL &&
-+		    c->x86 == 0xF && c->x86_model == 0x3 &&
-+		    c->x86_mask == 0x4)
-+			c->x86_phys_bits = 36;
-+	}
++	/* Split wallclock base into seconds and nanoseconds. */
++	tmp = wc_nsec;
++	xtime_nsec = do_div(tmp, 1000000000);
++	xtime_sec  = (time_t)tmp;
 +
-+	if (c->x86 == 15)
-+		c->x86_cache_alignment = c->x86_clflush_size * 2;
-+	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
-+	    (c->x86 == 0x6 && c->x86_model >= 0x0e))
-+		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
-+	set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
-+ 	c->x86_max_cores = intel_num_cpu_cores(c);
++	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
++	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
 +
-+	srat_detect_node();
++	set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
++	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++
++	ntp_clear();
 +}
 +
-+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
++static void update_wallclock(void)
 +{
-+	char *v = c->x86_vendor_id;
++	shared_info_t *s = HYPERVISOR_shared_info;
 +
-+	if (!strcmp(v, "AuthenticAMD"))
-+		c->x86_vendor = X86_VENDOR_AMD;
-+	else if (!strcmp(v, "GenuineIntel"))
-+		c->x86_vendor = X86_VENDOR_INTEL;
-+	else
-+		c->x86_vendor = X86_VENDOR_UNKNOWN;
++	do {
++		shadow_tv_version = s->wc_version;
++		rmb();
++		shadow_tv.tv_sec  = s->wc_sec;
++		shadow_tv.tv_nsec = s->wc_nsec;
++		rmb();
++	} while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
++
++	if (!independent_wallclock)
++		__update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
 +}
 +
-+struct cpu_model_info {
-+	int vendor;
-+	int family;
-+	char *model_names[16];
-+};
++/*
++ * Reads a consistent set of time-base values from Xen, into a shadow data
++ * area.
++ */
++static void get_time_values_from_xen(void)
++{
++	shared_info_t           *s = HYPERVISOR_shared_info;
++	struct vcpu_time_info   *src;
++	struct shadow_time_info *dst;
 +
-+/* Do some early cpuid on the boot CPU to get some parameter that are
-+   needed before check_bugs. Everything advanced is in identify_cpu
-+   below. */
-+void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
++	src = &s->vcpu_info[smp_processor_id()].time;
++	dst = &per_cpu(shadow_time, smp_processor_id());
++
++	do {
++		dst->version = src->version;
++		rmb();
++		dst->tsc_timestamp     = src->tsc_timestamp;
++		dst->system_timestamp  = src->system_time;
++		dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
++		dst->tsc_shift         = src->tsc_shift;
++		rmb();
++	} while ((src->version & 1) | (dst->version ^ src->version));
++
++	dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
++}
++
++static inline int time_values_up_to_date(int cpu)
 +{
-+	u32 tfms;
++	struct vcpu_time_info   *src;
++	struct shadow_time_info *dst;
 +
-+	c->loops_per_jiffy = loops_per_jiffy;
-+	c->x86_cache_size = -1;
-+	c->x86_vendor = X86_VENDOR_UNKNOWN;
-+	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
-+	c->x86_vendor_id[0] = '\0'; /* Unset */
-+	c->x86_model_id[0] = '\0';  /* Unset */
-+	c->x86_clflush_size = 64;
-+	c->x86_cache_alignment = c->x86_clflush_size;
-+	c->x86_max_cores = 1;
-+	c->extended_cpuid_level = 0;
-+	memset(&c->x86_capability, 0, sizeof c->x86_capability);
++	src = &HYPERVISOR_shared_info->vcpu_info[cpu].time;
++	dst = &per_cpu(shadow_time, cpu);
 +
-+	/* Get vendor name */
-+	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
-+	      (unsigned int *)&c->x86_vendor_id[0],
-+	      (unsigned int *)&c->x86_vendor_id[8],
-+	      (unsigned int *)&c->x86_vendor_id[4]);
-+		
-+	get_cpu_vendor(c);
++	rmb();
++	return (dst->version == src->version);
++}
 +
-+	/* Initialize the standard set of capabilities */
-+	/* Note that the vendor-specific code below might override */
++/*
++ * This is a special lock that is owned by the CPU and holds the index
++ * register we are working with.  It is required for NMI access to the
++ * CMOS/RTC registers.  See include/asm-i386/mc146818rtc.h for details.
++ */
++volatile unsigned long cmos_lock = 0;
++EXPORT_SYMBOL(cmos_lock);
 +
-+	/* Intel-defined flags: level 0x00000001 */
-+	if (c->cpuid_level >= 0x00000001) {
-+		__u32 misc;
-+		cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
-+		      &c->x86_capability[0]);
-+		c->x86 = (tfms >> 8) & 0xf;
-+		c->x86_model = (tfms >> 4) & 0xf;
-+		c->x86_mask = tfms & 0xf;
-+		if (c->x86 == 0xf)
-+			c->x86 += (tfms >> 20) & 0xff;
-+		if (c->x86 >= 0x6)
-+			c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+		if (c->x86_capability[0] & (1<<19)) 
-+			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
-+	} else {
-+		/* Have CPUID level 0 only - unheard of */
-+		c->x86 = 4;
-+	}
++/* Routines for accessing the CMOS RAM/RTC. */
++unsigned char rtc_cmos_read(unsigned char addr)
++{
++	unsigned char val;
++	lock_cmos_prefix(addr);
++	outb_p(addr, RTC_PORT(0));
++	val = inb_p(RTC_PORT(1));
++	lock_cmos_suffix(addr);
++	return val;
++}
++EXPORT_SYMBOL(rtc_cmos_read);
 +
-+#ifdef CONFIG_SMP
-+	c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
-+#endif
++void rtc_cmos_write(unsigned char val, unsigned char addr)
++{
++	lock_cmos_prefix(addr);
++	outb_p(addr, RTC_PORT(0));
++	outb_p(val, RTC_PORT(1));
++	lock_cmos_suffix(addr);
 +}
++EXPORT_SYMBOL(rtc_cmos_write);
 +
 +/*
-+ * This does the hard work of actually picking apart the CPU stuff...
++ * This version of gettimeofday has microsecond resolution
++ * and better than microsecond precision on fast x86 machines with TSC.
 + */
-+void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++void do_gettimeofday(struct timeval *tv)
 +{
-+	int i;
-+	u32 xlvl;
++	unsigned long seq;
++	unsigned long usec, sec;
++	unsigned long max_ntp_tick;
++	s64 nsec;
++	unsigned int cpu;
++	struct shadow_time_info *shadow;
++	u32 local_time_version;
 +
-+	early_identify_cpu(c);
++	cpu = get_cpu();
++	shadow = &per_cpu(shadow_time, cpu);
 +
-+	/* AMD-defined flags: level 0x80000001 */
-+	xlvl = cpuid_eax(0x80000000);
-+	c->extended_cpuid_level = xlvl;
-+	if ((xlvl & 0xffff0000) == 0x80000000) {
-+		if (xlvl >= 0x80000001) {
-+			c->x86_capability[1] = cpuid_edx(0x80000001);
-+			c->x86_capability[6] = cpuid_ecx(0x80000001);
-+		}
-+		if (xlvl >= 0x80000004)
-+			get_model_name(c); /* Default name */
-+	}
++	do {
++		local_time_version = shadow->version;
++		seq = read_seqbegin(&xtime_lock);
 +
-+	/* Transmeta-defined flags: level 0x80860001 */
-+	xlvl = cpuid_eax(0x80860000);
-+	if ((xlvl & 0xffff0000) == 0x80860000) {
-+		/* Don't set x86_cpuid_level here for now to not confuse. */
-+		if (xlvl >= 0x80860001)
-+			c->x86_capability[2] = cpuid_edx(0x80860001);
-+	}
++		usec = get_usec_offset(shadow);
 +
-+	c->apicid = phys_pkg_id(0);
++		/*
++		 * If time_adjust is negative then NTP is slowing the clock
++		 * so make sure not to go into next possible interval.
++		 * Better to lose some accuracy than have time go backwards..
++		 */
++		if (unlikely(time_adjust < 0)) {
++			max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
++			usec = min(usec, max_ntp_tick);
++		}
 +
-+	/*
-+	 * Vendor-specific initialization.  In this section we
-+	 * canonicalize the feature flags, meaning if there are
-+	 * features a certain CPU supports which CPUID doesn't
-+	 * tell us, CPUID claiming incorrect flags, or other bugs,
-+	 * we handle them here.
-+	 *
-+	 * At the end of this section, c->x86_capability better
-+	 * indicate the features this CPU genuinely supports!
-+	 */
-+	switch (c->x86_vendor) {
-+	case X86_VENDOR_AMD:
-+		init_amd(c);
-+		break;
++		sec = xtime.tv_sec;
++		usec += (xtime.tv_nsec / NSEC_PER_USEC);
 +
-+	case X86_VENDOR_INTEL:
-+		init_intel(c);
-+		break;
++		nsec = shadow->system_timestamp - processed_system_time;
++		__normalize_time(&sec, &nsec);
++		usec += (long)nsec / NSEC_PER_USEC;
 +
-+	case X86_VENDOR_UNKNOWN:
-+	default:
-+		display_cacheinfo(c);
-+		break;
-+	}
++		if (unlikely(!time_values_up_to_date(cpu))) {
++			/*
++			 * We may have blocked for a long time,
++			 * rendering our calculations invalid
++			 * (e.g. the time delta may have
++			 * overflowed). Detect that and recalculate
++			 * with fresh values.
++			 */
++			get_time_values_from_xen();
++			continue;
++		}
++	} while (read_seqretry(&xtime_lock, seq) ||
++		 (local_time_version != shadow->version));
 +
-+	select_idle_routine(c);
-+	detect_ht(c); 
++	put_cpu();
 +
-+	/*
-+	 * On SMP, boot_cpu_data holds the common feature set between
-+	 * all CPUs; so make sure that we indicate which features are
-+	 * common between the CPUs.  The first time this routine gets
-+	 * executed, c == &boot_cpu_data.
-+	 */
-+	if (c != &boot_cpu_data) {
-+		/* AND the already accumulated flags with these */
-+		for (i = 0 ; i < NCAPINTS ; i++)
-+			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++	while (usec >= USEC_PER_SEC) {
++		usec -= USEC_PER_SEC;
++		sec++;
 +	}
 +
-+#ifdef CONFIG_X86_MCE
-+	mcheck_init(c);
-+#endif
-+	if (c == &boot_cpu_data)
-+		mtrr_bp_init();
-+	else
-+		mtrr_ap_init();
-+#ifdef CONFIG_NUMA
-+	numa_add_cpu(smp_processor_id());
-+#endif
++	tv->tv_sec = sec;
++	tv->tv_usec = usec;
 +}
-+ 
 +
-+void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
++EXPORT_SYMBOL(do_gettimeofday);
++
++int do_settimeofday(struct timespec *tv)
 +{
-+	if (c->x86_model_id[0])
-+		printk("%s", c->x86_model_id);
++	time_t sec;
++	s64 nsec;
++	unsigned int cpu;
++	struct shadow_time_info *shadow;
++	dom0_op_t op;
 +
-+	if (c->x86_mask || c->cpuid_level >= 0) 
-+		printk(" stepping %02x\n", c->x86_mask);
-+	else
-+		printk("\n");
-+}
++	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++		return -EINVAL;
 +
-+/*
-+ *	Get CPU information for use by the procfs.
-+ */
++	cpu = get_cpu();
++	shadow = &per_cpu(shadow_time, cpu);
 +
-+static int show_cpuinfo(struct seq_file *m, void *v)
-+{
-+	struct cpuinfo_x86 *c = v;
++	write_seqlock_irq(&xtime_lock);
 +
-+	/* 
-+	 * These flag bits must match the definitions in <asm/cpufeature.h>.
-+	 * NULL means this bit is undefined or reserved; either way it doesn't
-+	 * have meaning as far as Linux is concerned.  Note that it's important
-+	 * to realize there is a difference between this table and CPUID -- if
-+	 * applications want to get the raw CPUID data, they should access
-+	 * /dev/cpu/<cpu_nr>/cpuid instead.
++	/*
++	 * Ensure we don't get blocked for a long time so that our time delta
++	 * overflows. If that were to happen then our shadow time values would
++	 * be stale, so we can retry with fresh ones.
 +	 */
-+	static char *x86_cap_flags[] = {
-+		/* Intel-defined */
-+	        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
-+	        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
-+	        "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
-+	        "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
++	for (;;) {
++		nsec = tv->tv_nsec - get_nsec_offset(shadow);
++		if (time_values_up_to_date(cpu))
++			break;
++		get_time_values_from_xen();
++	}
++	sec = tv->tv_sec;
++	__normalize_time(&sec, &nsec);
 +
-+		/* AMD-defined */
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
-+		NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
++	if (is_initial_xendomain() && !independent_wallclock) {
++		op.cmd = DOM0_SETTIME;
++		op.u.settime.secs        = sec;
++		op.u.settime.nsecs       = nsec;
++		op.u.settime.system_time = shadow->system_timestamp;
++		HYPERVISOR_dom0_op(&op);
++		update_wallclock();
++	} else if (independent_wallclock) {
++		nsec -= shadow->system_timestamp;
++		__normalize_time(&sec, &nsec);
++		__update_wallclock(sec, nsec);
++	}
 +
-+		/* Transmeta-defined */
-+		"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++	write_sequnlock_irq(&xtime_lock);
 +
-+		/* Other (Linux-defined) */
-+		"cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
-+		"constant_tsc", NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		"up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++	put_cpu();
 +
-+		/* Intel-defined (#2) */
-+		"pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
-+		"tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++	clock_was_set();
++	return 0;
++}
 +
-+		/* VIA/Cyrix/Centaur-defined */
-+		NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++EXPORT_SYMBOL(do_settimeofday);
 +
-+		/* AMD-defined (#2) */
-+		"lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+	};
-+	static char *x86_power_flags[] = { 
-+		"ts",	/* temperature sensor */
-+		"fid",  /* frequency id control */
-+		"vid",  /* voltage id control */
-+		"ttp",  /* thermal trip */
-+		"tm",
-+		"stc",
-+		NULL,
-+		/* nothing */	/* constant_tsc - moved to flags */
-+	};
++static void sync_xen_wallclock(unsigned long dummy);
++static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
++static void sync_xen_wallclock(unsigned long dummy)
++{
++	time_t sec;
++	s64 nsec;
++	dom0_op_t op;
 +
++	if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
++		return;
 +
-+#ifdef CONFIG_SMP
-+	if (!cpu_online(c-cpu_data))
-+		return 0;
-+#endif
++	write_seqlock_irq(&xtime_lock);
 +
-+	seq_printf(m,"processor\t: %u\n"
-+		     "vendor_id\t: %s\n"
-+		     "cpu family\t: %d\n"
-+		     "model\t\t: %d\n"
-+		     "model name\t: %s\n",
-+		     (unsigned)(c-cpu_data),
-+		     c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
-+		     c->x86,
-+		     (int)c->x86_model,
-+		     c->x86_model_id[0] ? c->x86_model_id : "unknown");
-+	
-+	if (c->x86_mask || c->cpuid_level >= 0)
-+		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
-+	else
-+		seq_printf(m, "stepping\t: unknown\n");
-+	
-+	if (cpu_has(c,X86_FEATURE_TSC)) {
-+		unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
-+		if (!freq)
-+			freq = cpu_khz;
-+		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
-+			     freq / 1000, (freq % 1000));
-+	}
++	sec  = xtime.tv_sec;
++	nsec = xtime.tv_nsec;
++	__normalize_time(&sec, &nsec);
 +
-+	/* Cache size */
-+	if (c->x86_cache_size >= 0) 
-+		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
-+	
-+#ifdef CONFIG_SMP
-+	if (smp_num_siblings * c->x86_max_cores > 1) {
-+		int cpu = c - cpu_data;
-+		seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-+		seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
-+		seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
-+		seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
-+	}
-+#endif	
++	op.cmd = DOM0_SETTIME;
++	op.u.settime.secs        = sec;
++	op.u.settime.nsecs       = nsec;
++	op.u.settime.system_time = processed_system_time;
++	HYPERVISOR_dom0_op(&op);
 +
-+	seq_printf(m,
-+	        "fpu\t\t: yes\n"
-+	        "fpu_exception\t: yes\n"
-+	        "cpuid level\t: %d\n"
-+	        "wp\t\t: yes\n"
-+	        "flags\t\t:",
-+		   c->cpuid_level);
++	update_wallclock();
 +
-+	{ 
-+		int i; 
-+		for ( i = 0 ; i < 32*NCAPINTS ; i++ )
-+			if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
-+				seq_printf(m, " %s", x86_cap_flags[i]);
-+	}
-+		
-+	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
-+		   c->loops_per_jiffy/(500000/HZ),
-+		   (c->loops_per_jiffy/(5000/HZ)) % 100);
++	write_sequnlock_irq(&xtime_lock);
 +
-+	if (c->x86_tlbsize > 0) 
-+		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
-+	seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
-+	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
++	/* Once per minute. */
++	mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
++}
 +
-+	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 
-+		   c->x86_phys_bits, c->x86_virt_bits);
++static int set_rtc_mmss(unsigned long nowtime)
++{
++	int retval;
++	unsigned long flags;
 +
-+	seq_printf(m, "power management:");
-+	{
-+		unsigned i;
-+		for (i = 0; i < 32; i++) 
-+			if (c->x86_power & (1 << i)) {
-+				if (i < ARRAY_SIZE(x86_power_flags) &&
-+					x86_power_flags[i])
-+					seq_printf(m, "%s%s",
-+						x86_power_flags[i][0]?" ":"",
-+						x86_power_flags[i]);
-+				else
-+					seq_printf(m, " [%d]", i);
-+			}
-+	}
++	if (independent_wallclock || !is_initial_xendomain())
++		return 0;
 +
-+	seq_printf(m, "\n\n");
++	/* gets recalled with irq locally disabled */
++	spin_lock_irqsave(&rtc_lock, flags);
++	if (efi_enabled)
++		retval = efi_set_rtc_mmss(nowtime);
++	else
++		retval = mach_set_rtc_mmss(nowtime);
++	spin_unlock_irqrestore(&rtc_lock, flags);
 +
-+	return 0;
++	return retval;
 +}
 +
-+static void *c_start(struct seq_file *m, loff_t *pos)
++/* monotonic_clock(): returns # of nanoseconds passed since time_init()
++ *		Note: This function is required to return accurate
++ *		time even in the absence of multiple timer ticks.
++ */
++unsigned long long monotonic_clock(void)
 +{
-+	return *pos < NR_CPUS ? cpu_data + *pos : NULL;
-+}
++	int cpu = get_cpu();
++	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++	u64 time;
++	u32 local_time_version;
 +
-+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-+{
-+	++*pos;
-+	return c_start(m, pos);
++	do {
++		local_time_version = shadow->version;
++		barrier();
++		time = shadow->system_timestamp + get_nsec_offset(shadow);
++		if (!time_values_up_to_date(cpu))
++			get_time_values_from_xen();
++		barrier();
++	} while (local_time_version != shadow->version);
++
++	put_cpu();
++
++	return time;
 +}
++EXPORT_SYMBOL(monotonic_clock);
 +
-+static void c_stop(struct seq_file *m, void *v)
++unsigned long long sched_clock(void)
 +{
++	return monotonic_clock();
 +}
 +
-+struct seq_operations cpuinfo_op = {
-+	.start =c_start,
-+	.next =	c_next,
-+	.stop =	c_stop,
-+	.show =	show_cpuinfo,
-+};
-+
-+#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
-+#include <linux/platform_device.h>
-+static __init int add_pcspkr(void)
++unsigned long profile_pc(struct pt_regs *regs)
 +{
-+	struct platform_device *pd;
-+	int ret;
-+
-+	pd = platform_device_alloc("pcspkr", -1);
-+	if (!pd)
-+		return -ENOMEM;
-+
-+	ret = platform_device_add(pd);
-+	if (ret)
-+		platform_device_put(pd);
++	unsigned long pc = instruction_pointer(regs);
 +
-+	return ret;
++	/* Assume the lock function has either no stack frame or a copy
++	   of eflags from PUSHF
++	   Eflags always has bits 22 and up cleared unlike kernel addresses. */
++	if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++		unsigned long *sp = (unsigned long *)regs->rsp;
++		if (sp[0] >> 22)
++			return sp[0];
++		if (sp[1] >> 22)
++			return sp[1];
++	}
++	return pc;
 +}
-+device_initcall(add_pcspkr);
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/smp-xen.c linux-2.6.18-xen/arch/x86_64/kernel/smp-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/smp-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/smp-xen.c	2006-11-19 14:26:34.000000000 +0100
-@@ -0,0 +1,600 @@
-+/*
-+ *	Intel SMP support routines.
-+ *
-+ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
-+ *	(c) 1998-99, 2000 Ingo Molnar <mingo at redhat.com>
-+ *      (c) 2002,2003 Andi Kleen, SuSE Labs.
-+ *
-+ *	This code is released under the GNU General Public License version 2 or
-+ *	later.
-+ */
++EXPORT_SYMBOL(profile_pc);
 +
-+#include <linux/init.h>
++irqreturn_t timer_interrupt(int irq, void *dev_id)
++{
++	s64 delta, delta_cpu, stolen, blocked;
++	u64 sched_time;
++	int i, cpu = smp_processor_id();
++	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++	struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
 +
-+#include <linux/mm.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/smp_lock.h>
-+#include <linux/smp.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/interrupt.h>
++	write_seqlock(&xtime_lock);
 +
-+#include <asm/mtrr.h>
-+#include <asm/pgalloc.h>
-+#include <asm/tlbflush.h>
-+#include <asm/mach_apic.h>
-+#include <asm/mmu_context.h>
-+#include <asm/proto.h>
-+#include <asm/apicdef.h>
-+#include <asm/idle.h>
-+#ifdef CONFIG_XEN
-+#include <xen/evtchn.h>
-+#endif
++	do {
++		get_time_values_from_xen();
 +
-+#ifndef CONFIG_XEN
-+/*
-+ *	Smarter SMP flushing macros. 
-+ *		c/o Linus Torvalds.
-+ *
-+ *	These mean you can really definitely utterly forget about
-+ *	writing to user space from interrupts. (Its not allowed anyway).
-+ *
-+ *	Optimizations Manfred Spraul <manfred at colorfullife.com>
-+ *
-+ * 	More scalable flush, from Andi Kleen
-+ *
-+ * 	To avoid global state use 8 different call vectors.
-+ * 	Each CPU uses a specific vector to trigger flushes on other
-+ * 	CPUs. Depending on the received vector the target CPUs look into
-+ *	the right per cpu variable for the flush data.
-+ *
-+ * 	With more than 8 CPUs they are hashed to the 8 available
-+ * 	vectors. The limited global vector space forces us to this right now.
-+ *	In future when interrupts are split into per CPU domains this could be
-+ *	fixed, at the cost of triggering multiple IPIs in some cases.
-+ */
++		/* Obtain a consistent snapshot of elapsed wallclock cycles. */
++		delta = delta_cpu =
++			shadow->system_timestamp + get_nsec_offset(shadow);
++		delta     -= processed_system_time;
++		delta_cpu -= per_cpu(processed_system_time, cpu);
 +
-+union smp_flush_state {
-+	struct {
-+		cpumask_t flush_cpumask;
-+		struct mm_struct *flush_mm;
-+		unsigned long flush_va;
-+#define FLUSH_ALL	-1ULL
-+		spinlock_t tlbstate_lock;
-+	};
-+	char pad[SMP_CACHE_BYTES];
-+} ____cacheline_aligned;
++		/*
++		 * Obtain a consistent snapshot of stolen/blocked cycles. We
++		 * can use state_entry_time to detect if we get preempted here.
++		 */
++		do {
++			sched_time = runstate->state_entry_time;
++			barrier();
++			stolen = runstate->time[RUNSTATE_runnable] +
++				runstate->time[RUNSTATE_offline] -
++				per_cpu(processed_stolen_time, cpu);
++			blocked = runstate->time[RUNSTATE_blocked] -
++				per_cpu(processed_blocked_time, cpu);
++			barrier();
++		} while (sched_time != runstate->state_entry_time);
++	} while (!time_values_up_to_date(cpu));
 +
-+/* State is put into the per CPU data section, but padded
-+   to a full cache line because other CPUs can access it and we don't
-+   want false sharing in the per cpu data segment. */
-+static DEFINE_PER_CPU(union smp_flush_state, flush_state);
-+#endif
++	if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
++	     unlikely(delta_cpu < -(s64)permitted_clock_jitter))
++	    && printk_ratelimit()) {
++		printk("Timer ISR/%d: Time went backwards: "
++		       "delta=%lld delta_cpu=%lld shadow=%lld "
++		       "off=%lld processed=%lld cpu_processed=%lld\n",
++		       cpu, delta, delta_cpu, shadow->system_timestamp,
++		       (s64)get_nsec_offset(shadow),
++		       processed_system_time,
++		       per_cpu(processed_system_time, cpu));
++		for (i = 0; i < num_online_cpus(); i++)
++			printk(" %d: %lld\n", i,
++			       per_cpu(processed_system_time, i));
++	}
 +
-+/*
-+ * We cannot call mmdrop() because we are in interrupt context, 
-+ * instead update mm->cpu_vm_mask.
-+ */
-+static inline void leave_mm(unsigned long cpu)
-+{
-+	if (read_pda(mmu_state) == TLBSTATE_OK)
-+		BUG();
-+	cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
-+	load_cr3(swapper_pg_dir);
-+}
++	/* System-wide jiffy work. */
++	while (delta >= NS_PER_TICK) {
++		delta -= NS_PER_TICK;
++		processed_system_time += NS_PER_TICK;
++		do_timer(1);
++	}
 +
-+#ifndef CONFIG_XEN
-+/*
-+ *
-+ * The flush IPI assumes that a thread switch happens in this order:
-+ * [cpu0: the cpu that switches]
-+ * 1) switch_mm() either 1a) or 1b)
-+ * 1a) thread switch to a different mm
-+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
-+ * 	Stop ipi delivery for the old mm. This is not synchronized with
-+ * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
-+ * 	for the wrong mm, and in the worst case we perform a superfluous
-+ * 	tlb flush.
-+ * 1a2) set cpu mmu_state to TLBSTATE_OK
-+ * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
-+ *	was in lazy tlb mode.
-+ * 1a3) update cpu active_mm
-+ * 	Now cpu0 accepts tlb flushes for the new mm.
-+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
-+ * 	Now the other cpus will send tlb flush ipis.
-+ * 1a4) change cr3.
-+ * 1b) thread switch without mm change
-+ *	cpu active_mm is correct, cpu0 already handles
-+ *	flush ipis.
-+ * 1b1) set cpu mmu_state to TLBSTATE_OK
-+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
-+ * 	Atomically set the bit [other cpus will start sending flush ipis],
-+ * 	and test the bit.
-+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
-+ * 2) switch %%esp, ie current
-+ *
-+ * The interrupt must handle 2 special cases:
-+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
-+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
-+ *   runs in kernel space, the cpu could load tlb entries for user space
-+ *   pages.
-+ *
-+ * The good news is that cpu mmu_state is local to each cpu, no
-+ * write/read ordering problems.
-+ */
++	if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
++		update_wallclock();
++		clock_was_set();
++	}
 +
-+/*
-+ * TLB flush IPI:
-+ *
-+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
-+ * 2) Leave the mm if we are in the lazy tlb mode.
-+ *
-+ * Interrupts are disabled.
-+ */
++	write_sequnlock(&xtime_lock);
 +
-+asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
-+{
-+	int cpu;
-+	int sender;
-+	union smp_flush_state *f;
++	/*
++	 * Account stolen ticks.
++	 * HACK: Passing NULL to account_steal_time()
++	 * ensures that the ticks are accounted as stolen.
++	 */
++	if ((stolen > 0) && (delta_cpu > 0)) {
++		delta_cpu -= stolen;
++		if (unlikely(delta_cpu < 0))
++			stolen += delta_cpu; /* clamp local-time progress */
++		do_div(stolen, NS_PER_TICK);
++		per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
++		per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
++		account_steal_time(NULL, (cputime_t)stolen);
++	}
 +
-+	cpu = smp_processor_id();
 +	/*
-+	 * orig_rax contains the interrupt vector - 256.
-+	 * Use that to determine where the sender put the data.
++	 * Account blocked ticks.
++	 * HACK: Passing idle_task to account_steal_time()
++	 * ensures that the ticks are accounted as idle/wait.
 +	 */
-+	sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
-+	f = &per_cpu(flush_state, sender);
++	if ((blocked > 0) && (delta_cpu > 0)) {
++		delta_cpu -= blocked;
++		if (unlikely(delta_cpu < 0))
++			blocked += delta_cpu; /* clamp local-time progress */
++		do_div(blocked, NS_PER_TICK);
++		per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
++		per_cpu(processed_system_time, cpu)  += blocked * NS_PER_TICK;
++		account_steal_time(idle_task(cpu), (cputime_t)blocked);
++	}
 +
-+	if (!cpu_isset(cpu, f->flush_cpumask))
-+		goto out;
-+		/* 
-+		 * This was a BUG() but until someone can quote me the
-+		 * line from the intel manual that guarantees an IPI to
-+		 * multiple CPUs is retried _only_ on the erroring CPUs
-+		 * its staying as a return
-+		 *
-+		 * BUG();
-+		 */
-+		 
-+	if (f->flush_mm == read_pda(active_mm)) {
-+		if (read_pda(mmu_state) == TLBSTATE_OK) {
-+			if (f->flush_va == FLUSH_ALL)
-+				local_flush_tlb();
-+			else
-+				__flush_tlb_one(f->flush_va);
-+		} else
-+			leave_mm(cpu);
++	/* Account user/system ticks. */
++	if (delta_cpu > 0) {
++		do_div(delta_cpu, NS_PER_TICK);
++		per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
++		if (user_mode(get_irq_regs()))
++			account_user_time(current, (cputime_t)delta_cpu);
++		else
++			account_system_time(current, HARDIRQ_OFFSET,
++					    (cputime_t)delta_cpu);
 +	}
-+out:
-+	ack_APIC_irq();
-+	cpu_clear(cpu, f->flush_cpumask);
++
++	/* Local timer processing (see update_process_times()). */
++	run_local_timers();
++	if (rcu_pending(cpu))
++		rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
++	scheduler_tick();
++	run_posix_cpu_timers(current);
++
++	return IRQ_HANDLED;
 +}
 +
-+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
-+						unsigned long va)
++static void init_missing_ticks_accounting(int cpu)
 +{
-+	int sender;
-+	union smp_flush_state *f;
++	struct vcpu_register_runstate_memory_area area;
++	struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
 +
-+	/* Caller has disabled preemption */
-+	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
-+	f = &per_cpu(flush_state, sender);
++	memset(runstate, 0, sizeof(*runstate));
 +
-+	/* Could avoid this lock when
-+	   num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
-+	   probably not worth checking this for a cache-hot lock. */
-+	spin_lock(&f->tlbstate_lock);
++	area.addr.v = runstate;
++	HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
 +
-+	f->flush_mm = mm;
-+	f->flush_va = va;
-+	cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
++	per_cpu(processed_blocked_time, cpu) =
++		runstate->time[RUNSTATE_blocked];
++	per_cpu(processed_stolen_time, cpu) =
++		runstate->time[RUNSTATE_runnable] +
++		runstate->time[RUNSTATE_offline];
++}
 +
-+	/*
-+	 * We have to send the IPI only to
-+	 * CPUs affected.
-+	 */
-+	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
++/* not static: needed by APM */
++unsigned long get_cmos_time(void)
++{
++	unsigned long retval;
++	unsigned long flags;
++
++	spin_lock_irqsave(&rtc_lock, flags);
++
++	if (efi_enabled)
++		retval = efi_get_time();
++	else
++		retval = mach_get_cmos_time();
 +
-+	while (!cpus_empty(f->flush_cpumask))
-+		cpu_relax();
++	spin_unlock_irqrestore(&rtc_lock, flags);
 +
-+	f->flush_mm = NULL;
-+	f->flush_va = 0;
-+	spin_unlock(&f->tlbstate_lock);
++	return retval;
 +}
++EXPORT_SYMBOL(get_cmos_time);
 +
-+int __cpuinit init_smp_flush(void)
-+{
-+	int i;
-+	for_each_cpu_mask(i, cpu_possible_map) {
-+		spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
-+	}
-+	return 0;
-+}
++static void sync_cmos_clock(unsigned long dummy);
 +
-+core_initcall(init_smp_flush);
-+	
-+void flush_tlb_current_task(void)
++static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
++
++static void sync_cmos_clock(unsigned long dummy)
 +{
-+	struct mm_struct *mm = current->mm;
-+	cpumask_t cpu_mask;
++	struct timeval now, next;
++	int fail = 1;
 +
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
++	/*
++	 * If we have an externally synchronized Linux clock, then update
++	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
++	 * called as close as possible to 500 ms before the new second starts.
++	 * This code is run on a timer.  If the clock is set, that timer
++	 * may not expire at the correct time.  Thus, we adjust...
++	 */
++	if (!ntp_synced())
++		/*
++		 * Not synced, exit, do not restart a timer (if one is
++		 * running, let it run out).
++		 */
++		return;
 +
-+	local_flush_tlb();
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL(flush_tlb_current_task);
++	do_gettimeofday(&now);
++	if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
++	    now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
++		fail = set_rtc_mmss(now.tv_sec);
 +
-+void flush_tlb_mm (struct mm_struct * mm)
-+{
-+	cpumask_t cpu_mask;
++	next.tv_usec = USEC_AFTER - now.tv_usec;
++	if (next.tv_usec <= 0)
++		next.tv_usec += USEC_PER_SEC;
 +
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
++	if (!fail)
++		next.tv_sec = 659;
++	else
++		next.tv_sec = 0;
 +
-+	if (current->active_mm == mm) {
-+		if (current->mm)
-+			local_flush_tlb();
-+		else
-+			leave_mm(smp_processor_id());
++	if (next.tv_usec >= USEC_PER_SEC) {
++		next.tv_sec++;
++		next.tv_usec -= USEC_PER_SEC;
 +	}
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+
-+	preempt_enable();
++	mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
 +}
-+EXPORT_SYMBOL(flush_tlb_mm);
 +
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++void notify_arch_cmos_timer(void)
 +{
-+	struct mm_struct *mm = vma->vm_mm;
-+	cpumask_t cpu_mask;
-+
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
++	mod_timer(&sync_cmos_timer, jiffies + 1);
++	mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
++}
 +
-+	if (current->active_mm == mm) {
-+		if(current->mm)
-+			__flush_tlb_one(va);
-+		 else
-+		 	leave_mm(smp_processor_id());
-+	}
++static long clock_cmos_diff;
++static unsigned long sleep_start;
 +
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, va);
++static int timer_suspend(struct sys_device *dev, pm_message_t state)
++{
++	/*
++	 * Estimate time zone so that set_time can update the clock
++	 */
++	unsigned long ctime =  get_cmos_time();
 +
-+	preempt_enable();
++	clock_cmos_diff = -ctime;
++	clock_cmos_diff += get_seconds();
++	sleep_start = ctime;
++	return 0;
 +}
-+EXPORT_SYMBOL(flush_tlb_page);
 +
-+static void do_flush_tlb_all(void* info)
++static int timer_resume(struct sys_device *dev)
 +{
-+	unsigned long cpu = smp_processor_id();
++	unsigned long flags;
++	unsigned long sec;
++	unsigned long ctime = get_cmos_time();
++	long sleep_length = (ctime - sleep_start) * HZ;
 +
-+	__flush_tlb_all();
-+	if (read_pda(mmu_state) == TLBSTATE_LAZY)
-+		leave_mm(cpu);
-+}
++	if (sleep_length < 0) {
++		printk(KERN_WARNING "CMOS clock skew detected in timer resume!\n");
++		/* The time after the resume must not be earlier than the time
++		 * before the suspend or some nasty things will happen
++		 */
++		sleep_length = 0;
++		ctime = sleep_start;
++	}
 +
-+void flush_tlb_all(void)
-+{
-+	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++#ifdef CONFIG_HPET_TIMER
++	if (is_hpet_enabled())
++		hpet_reenable();
++#endif
++	sec = ctime + clock_cmos_diff;
++	write_seqlock_irqsave(&xtime_lock, flags);
++	xtime.tv_sec = sec;
++	xtime.tv_nsec = 0;
++	jiffies_64 += sleep_length;
++	write_sequnlock_irqrestore(&xtime_lock, flags);
++	touch_softlockup_watchdog();
++	return 0;
 +}
-+#else
-+asmlinkage void smp_invalidate_interrupt (void)
-+{ return; }
-+void flush_tlb_current_task(void)
-+{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
-+void flush_tlb_mm (struct mm_struct * mm)
-+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
-+void flush_tlb_all(void)
-+{ xen_tlb_flush_all(); }
-+#endif /* Xen */
 +
-+/*
-+ * this function sends a 'reschedule' IPI to another CPU.
-+ * it goes straight through and wastes no time serializing
-+ * anything. Worst case is that we lose a reschedule ...
-+ */
++static struct sysdev_class timer_sysclass = {
++	.resume = timer_resume,
++	.suspend = timer_suspend,
++	set_kset_name("timer"),
++};
 +
-+void smp_send_reschedule(int cpu)
++
++/* XXX this driverfs stuff should probably go elsewhere later -john */
++static struct sys_device device_timer = {
++	.id	= 0,
++	.cls	= &timer_sysclass,
++};
++
++static int time_init_device(void)
 +{
-+	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++	int error = sysdev_class_register(&timer_sysclass);
++	if (!error)
++		error = sysdev_register(&device_timer);
++	return error;
 +}
 +
-+/*
-+ * Structure and data for smp_call_function(). This is designed to minimise
-+ * static memory requirements. It also looks cleaner.
-+ */
-+static DEFINE_SPINLOCK(call_lock);
++device_initcall(time_init_device);
 +
-+struct call_data_struct {
-+	void (*func) (void *info);
-+	void *info;
-+	atomic_t started;
-+	atomic_t finished;
-+	int wait;
-+};
++#ifdef CONFIG_HPET_TIMER
++extern void (*late_time_init)(void);
++/* Duplicate of time_init() below, with hpet_enable part added */
++static void __init hpet_time_init(void)
++{
++	xtime.tv_sec = get_cmos_time();
++	xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
++	set_normalized_timespec(&wall_to_monotonic,
++		-xtime.tv_sec, -xtime.tv_nsec);
 +
-+static struct call_data_struct * call_data;
++	if ((hpet_enable() >= 0) && hpet_use_timer) {
++		printk("Using HPET for base-timer\n");
++	}
 +
-+void lock_ipi_call_lock(void)
-+{
-+	spin_lock_irq(&call_lock);
++	time_init_hook();
 +}
++#endif
 +
-+void unlock_ipi_call_lock(void)
++/* Dynamically-mapped IRQ. */
++DEFINE_PER_CPU(int, timer_irq);
++
++extern void (*late_time_init)(void);
++static void setup_cpu0_timer_irq(void)
 +{
-+	spin_unlock_irq(&call_lock);
++	per_cpu(timer_irq, 0) =
++		bind_virq_to_irqhandler(
++			VIRQ_TIMER,
++			0,
++			timer_interrupt,
++			SA_INTERRUPT,
++			"timer0",
++			NULL);
++	BUG_ON(per_cpu(timer_irq, 0) < 0);
 +}
 +
-+/*
-+ * this function sends a 'generic call function' IPI to one other CPU
-+ * in the system.
-+ *
-+ * cpu is a standard Linux logical CPU number.
-+ */
-+static void
-+__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
-+				int nonatomic, int wait)
++void __init time_init(void)
 +{
-+	struct call_data_struct data;
-+	int cpus = 1;
++#ifdef CONFIG_HPET_TIMER
++	if (is_hpet_capable()) {
++		/*
++		 * HPET initialization needs to do memory-mapped io. So, let
++		 * us do a late initialization after mem_init().
++		 */
++		late_time_init = hpet_time_init;
++		return;
++	}
++#endif
++	get_time_values_from_xen();
 +
-+	data.func = func;
-+	data.info = info;
-+	atomic_set(&data.started, 0);
-+	data.wait = wait;
-+	if (wait)
-+		atomic_set(&data.finished, 0);
++	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++	per_cpu(processed_system_time, 0) = processed_system_time;
++	init_missing_ticks_accounting(0);
 +
-+	call_data = &data;
-+	wmb();
-+	/* Send a message to all other CPUs and wait for them to respond */
-+	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
++	update_wallclock();
 +
-+	/* Wait for response */
-+	while (atomic_read(&data.started) != cpus)
-+		cpu_relax();
++	init_cpu_khz();
++	printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
++	       cpu_khz / 1000, cpu_khz % 1000);
 +
-+	if (!wait)
-+		return;
++	vxtime.mode = VXTIME_TSC;
++	vxtime.quot = (1000000L << US_SCALE) / vxtime_hz;
++	vxtime.tsc_quot = (1000L << US_SCALE) / cpu_khz;
++	sync_core();
++	rdtscll(vxtime.last_tsc);
 +
-+	while (atomic_read(&data.finished) != cpus)
-+		cpu_relax();
++	/* Cannot request_irq() until kmem is initialised. */
++	late_time_init = setup_cpu0_timer_irq;
 +}
 +
-+/*
-+ * smp_call_function_single - Run a function on another CPU
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @nonatomic: Currently unused.
-+ * @wait: If true, wait until function has completed on other CPUs.
-+ *
-+ * Retrurns 0 on success, else a negative status code.
-+ *
-+ * Does not return until the remote CPU is nearly ready to execute <func>
-+ * or is or has executed.
-+ */
-+
-+int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
-+	int nonatomic, int wait)
++/* Convert jiffies to system time. */
++u64 jiffies_to_st(unsigned long j)
 +{
-+	/* prevent preemption and reschedule on another processor */
-+	int me = get_cpu();
-+	if (cpu == me) {
-+		WARN_ON(1);
-+		put_cpu();
-+		return -EBUSY;
-+	}
-+	spin_lock_bh(&call_lock);
-+	__smp_call_function_single(cpu, func, info, nonatomic, wait);
-+	spin_unlock_bh(&call_lock);
-+	put_cpu();
-+	return 0;
++	unsigned long seq;
++	long delta;
++	u64 st;
++
++	do {
++		seq = read_seqbegin(&xtime_lock);
++		delta = j - jiffies;
++		if (delta < 1) {
++			/* Triggers in some wrap-around cases, but that's okay:
++			 * we just end up with a shorter timeout. */
++			st = processed_system_time + NS_PER_TICK;
++		} else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
++			/* Very long timeout means there is no pending timer.
++			 * We indicate this to Xen by passing zero timeout. */
++			st = 0;
++		} else {
++			st = processed_system_time + delta * (u64)NS_PER_TICK;
++		}
++	} while (read_seqretry(&xtime_lock, seq));
++
++	return st;
 +}
++EXPORT_SYMBOL(jiffies_to_st);
 +
 +/*
-+ * this function sends a 'generic call function' IPI to all other CPUs
-+ * in the system.
++ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
++ * These functions are based on implementations from arch/s390/kernel/time.c
 + */
-+static void __smp_call_function (void (*func) (void *info), void *info,
-+				int nonatomic, int wait)
++static void stop_hz_timer(void)
 +{
-+	struct call_data_struct data;
-+	int cpus = num_online_cpus()-1;
-+
-+	if (!cpus)
-+		return;
++	unsigned int cpu = smp_processor_id();
++	unsigned long j;
 +
-+	data.func = func;
-+	data.info = info;
-+	atomic_set(&data.started, 0);
-+	data.wait = wait;
-+	if (wait)
-+		atomic_set(&data.finished, 0);
++	cpu_set(cpu, nohz_cpu_mask);
 +
-+	call_data = &data;
-+	wmb();
-+	/* Send a message to all other CPUs and wait for them to respond */
-+	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++	/* See matching smp_mb in rcu_start_batch in rcupdate.c.  These mbs  */
++	/* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a  */
++	/* value of rcp->cur that matches rdp->quiescbatch and allows us to  */
++	/* stop the hz timer then the cpumasks created for subsequent values */
++	/* of cur in rcu_start_batch are guaranteed to pick up the updated   */
++	/* nohz_cpu_mask and so will not depend on this cpu.                 */
 +
-+	/* Wait for response */
-+	while (atomic_read(&data.started) != cpus)
-+#ifndef CONFIG_XEN
-+		cpu_relax();
-+#else
-+		barrier();
-+#endif
++	smp_mb();
 +
-+	if (!wait)
-+		return;
++	/* Leave ourselves in tick mode if rcu or softirq or timer pending. */
++	if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
++	    (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
++		cpu_clear(cpu, nohz_cpu_mask);
++		j = jiffies + 1;
++	}
 +
-+	while (atomic_read(&data.finished) != cpus)
-+#ifndef CONFIG_XEN
-+		cpu_relax();
-+#else
-+		barrier();
-+#endif
++	if (HYPERVISOR_set_timer_op(jiffies_to_st(j)) != 0)
++		BUG();
 +}
 +
-+/*
-+ * smp_call_function - run a function on all other CPUs.
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @nonatomic: currently unused.
-+ * @wait: If true, wait (atomically) until function has completed on other
-+ *        CPUs.
-+ *
-+ * Returns 0 on success, else a negative status code. Does not return until
-+ * remote CPUs are nearly ready to execute func or are or have executed.
-+ *
-+ * You must not call this function with disabled interrupts or from a
-+ * hardware interrupt handler or from a bottom half handler.
-+ * Actually there are a few legal cases, like panic.
-+ */
-+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-+			int wait)
++static void start_hz_timer(void)
 +{
-+	spin_lock(&call_lock);
-+	__smp_call_function(func,info,nonatomic,wait);
-+	spin_unlock(&call_lock);
-+	return 0;
++	cpu_clear(smp_processor_id(), nohz_cpu_mask);
 +}
-+EXPORT_SYMBOL(smp_call_function);
 +
-+void smp_stop_cpu(void)
++void raw_safe_halt(void)
 +{
-+	unsigned long flags;
-+	/*
-+	 * Remove this CPU:
-+	 */
-+	cpu_clear(smp_processor_id(), cpu_online_map);
-+	local_irq_save(flags);
-+#ifndef CONFIG_XEN
-+	disable_local_APIC();
-+#endif
-+	local_irq_restore(flags); 
++	stop_hz_timer();
++	/* Blocking includes an implicit local_irq_enable(). */
++	HYPERVISOR_block();
++	start_hz_timer();
 +}
++EXPORT_SYMBOL(raw_safe_halt);
 +
-+static void smp_really_stop_cpu(void *dummy)
-+{
-+	smp_stop_cpu(); 
-+	for (;;) 
-+		halt();
-+} 
-+
-+void smp_send_stop(void)
++void halt(void)
 +{
-+	int nolock = 0;
-+#ifndef CONFIG_XEN
-+	if (reboot_force)
-+		return;
-+#endif
-+	/* Don't deadlock on the call lock in panic */
-+	if (!spin_trylock(&call_lock)) {
-+		/* ignore locking because we have panicked anyways */
-+		nolock = 1;
-+	}
-+	__smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
-+	if (!nolock)
-+		spin_unlock(&call_lock);
-+
-+	local_irq_disable();
-+#ifndef CONFIG_XEN
-+	disable_local_APIC();
-+#endif
-+	local_irq_enable();
++	if (irqs_disabled())
++		HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
 +}
++EXPORT_SYMBOL(halt);
 +
-+/*
-+ * Reschedule call back. Nothing to do,
-+ * all the work is done automatically when
-+ * we return from the interrupt.
-+ */
-+#ifndef CONFIG_XEN
-+asmlinkage void smp_reschedule_interrupt(void)
-+#else
-+asmlinkage irqreturn_t smp_reschedule_interrupt(void)
-+#endif
++/* No locking required. We are only CPU running, and interrupts are off. */
++void time_resume(void)
 +{
-+#ifndef CONFIG_XEN
-+	ack_APIC_irq();
-+#else
-+	return IRQ_HANDLED;
-+#endif
-+}
++	init_cpu_khz();
 +
-+#ifndef CONFIG_XEN
-+asmlinkage void smp_call_function_interrupt(void)
-+#else
-+asmlinkage irqreturn_t smp_call_function_interrupt(void)
-+#endif
-+{
-+	void (*func) (void *info) = call_data->func;
-+	void *info = call_data->info;
-+	int wait = call_data->wait;
++	get_time_values_from_xen();
 +
-+#ifndef CONFIG_XEN
-+	ack_APIC_irq();
-+#endif
-+	/*
-+	 * Notify initiating CPU that I've grabbed the data and am
-+	 * about to execute the function
-+	 */
-+	mb();
-+	atomic_inc(&call_data->started);
-+	/*
-+	 * At this point the info structure may be out of scope unless wait==1
-+	 */
-+	exit_idle();
-+	irq_enter();
-+	(*func)(info);
-+	irq_exit();
-+	if (wait) {
-+		mb();
-+		atomic_inc(&call_data->finished);
-+	}
-+#ifdef CONFIG_XEN
-+	return IRQ_HANDLED;
-+#endif
++	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++	per_cpu(processed_system_time, 0) = processed_system_time;
++	init_missing_ticks_accounting(0);
++
++	update_wallclock();
 +}
 +
-+int safe_smp_processor_id(void)
-+{
-+#ifdef CONFIG_XEN
-+	return smp_processor_id();
-+#else
-+	unsigned apicid, i;
++#ifdef CONFIG_SMP
++static char timer_name[NR_CPUS][15];
 +
-+	if (disable_apic)
-+		return 0;
++void local_setup_timer(unsigned int cpu)
++{
++	int seq;
 +
-+	apicid = hard_smp_processor_id();
-+	if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
-+		return apicid;
++	BUG_ON(cpu == 0);
 +
-+	for (i = 0; i < NR_CPUS; ++i) {
-+		if (x86_cpu_to_apicid[i] == apicid)
-+			return i;
-+	}
++	do {
++		seq = read_seqbegin(&xtime_lock);
++		/* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
++		per_cpu(processed_system_time, cpu) =
++			per_cpu(shadow_time, 0).system_timestamp;
++		init_missing_ticks_accounting(cpu);
++	} while (read_seqretry(&xtime_lock, seq));
 +
-+	/* No entries in x86_cpu_to_apicid?  Either no MPS|ACPI,
-+	 * or called too early.  Either way, we must be CPU 0. */
-+      	if (x86_cpu_to_apicid[0] == BAD_APICID)
-+		return 0;
++	sprintf(timer_name[cpu], "timer%d", cpu);
++	per_cpu(timer_irq, cpu) =
++		bind_virq_to_irqhandler(
++			VIRQ_TIMER,
++			cpu,
++			timer_interrupt,
++			SA_INTERRUPT,
++			timer_name[cpu],
++			NULL);
++	BUG_ON(per_cpu(timer_irq, cpu) < 0);
++}
 +
-+	return 0; /* Should not happen */
++void local_teardown_timer(unsigned int cpu)
++{
++	BUG_ON(cpu == 0);
++	unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
++}
 +#endif
++
++/*
++ * /proc/sys/xen: This really belongs in another file. It can stay here for
++ * now however.
++ */
++static ctl_table xen_subtable[] = {
++	{
++		.ctl_name	= 1,
++		.procname	= "independent_wallclock",
++		.data		= &independent_wallclock,
++		.maxlen		= sizeof(independent_wallclock),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec
++	},
++	{
++		.ctl_name	= 2,
++		.procname	= "permitted_clock_jitter",
++		.data		= &permitted_clock_jitter,
++		.maxlen		= sizeof(permitted_clock_jitter),
++		.mode		= 0644,
++		.proc_handler	= proc_doulongvec_minmax
++	},
++	{ 0 }
++};
++static ctl_table xen_table[] = {
++	{
++		.ctl_name	= 123,
++		.procname	= "xen",
++		.mode		= 0555,
++		.child		= xen_subtable},
++	{ 0 }
++};
++static int __init xen_sysctl_init(void)
++{
++	(void)register_sysctl_table(xen_table, 0);
++	return 0;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/traps-xen.c linux-2.6.18-xen/arch/x86_64/kernel/traps-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/traps-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/traps-xen.c	2006-11-19 14:26:34.000000000 +0100
-@@ -0,0 +1,1172 @@
++__initcall(xen_sysctl_init);
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/traps-xen.c b/arch/x86_64/kernel/traps-xen.c
+--- a/arch/x86_64/kernel/traps-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/traps-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,1156 @@
 +/*
 + *  linux/arch/x86-64/traps.c
 + *
@@ -39975,15 +35334,17 @@
 +#include <linux/delay.h>
 +#include <linux/spinlock.h>
 +#include <linux/interrupt.h>
++#include <linux/kallsyms.h>
 +#include <linux/module.h>
 +#include <linux/moduleparam.h>
 +#include <linux/nmi.h>
 +#include <linux/kprobes.h>
 +#include <linux/kexec.h>
 +#include <linux/unwind.h>
++#include <linux/uaccess.h>
++#include <linux/bug.h>
 +
 +#include <asm/system.h>
-+#include <asm/uaccess.h>
 +#include <asm/io.h>
 +#include <asm/atomic.h>
 +#include <asm/debugreg.h>
@@ -39997,6 +35358,7 @@
 +#include <asm/pda.h>
 +#include <asm/proto.h>
 +#include <asm/nmi.h>
++#include <asm/stacktrace.h>
 +
 +asmlinkage void divide_error(void);
 +asmlinkage void debug(void);
@@ -40021,6 +35383,7 @@
 +asmlinkage void spurious_interrupt_bug(void);
 +
 +ATOMIC_NOTIFIER_HEAD(die_chain);
++EXPORT_SYMBOL(die_chain);
 +
 +int register_die_notifier(struct notifier_block *nb)
 +{
@@ -40057,15 +35420,9 @@
 +	preempt_enable_no_resched();
 +}
 +
-+static int kstack_depth_to_print = 10;
-+#ifdef CONFIG_STACK_UNWIND
-+static int call_trace = 1;
-+#else
-+#define call_trace (-1)
-+#endif
++int kstack_depth_to_print = 12;
 +
 +#ifdef CONFIG_KALLSYMS
-+# include <linux/kallsyms.h>
 +void printk_address(unsigned long address)
 +{
 +	unsigned long offset = 0, symsize;
@@ -40093,7 +35450,7 @@
 +#endif
 +
 +static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
-+					unsigned *usedp, const char **idp)
++					unsigned *usedp, char **idp)
 +{
 +#ifndef CONFIG_X86_NO_TSS
 +	static char ids[][8] = {
@@ -40113,26 +35470,7 @@
 +	 * 'stack' is in one of them:
 +	 */
 +	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
-+		unsigned long end;
-+
-+		/*
-+		 * set 'end' to the end of the exception stack.
-+		 */
-+		switch (k + 1) {
-+		/*
-+		 * TODO: this block is not needed i think, because
-+		 * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
-+		 * properly too.
-+		 */
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+		case DEBUG_STACK:
-+			end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
-+			break;
-+#endif
-+		default:
-+			end = per_cpu(orig_ist, cpu).ist[k];
-+			break;
-+		}
++		unsigned long end = per_cpu(orig_ist, cpu).ist[k];
 +		/*
 +		 * Is 'stack' above this exception frame's end?
 +		 * If yes then skip to the next frame.
@@ -40187,18 +35525,7 @@
 +	return NULL;
 +}
 +
-+static int show_trace_unwind(struct unwind_frame_info *info, void *context)
-+{
-+	int n = 0;
-+
-+	while (unwind(info) == 0 && UNW_PC(info)) {
-+		if (arch_unw_user_mode(info))
-+			break;
-+		n++;
-+		printk_address(UNW_PC(info));
-+	}
-+	return n;
-+}
++#define MSG(txt) ops->warning(data, txt)
 +
 +/*
 + * x86-64 can have upto three kernel stacks: 
@@ -40207,55 +35534,42 @@
 + * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
 + */
 +
-+void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
++static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
 +{
-+	const unsigned cpu = safe_smp_processor_id();
-+	unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
-+	unsigned used = 0;
++	void *t = (void *)tinfo;
++        return p > t && p < t + THREAD_SIZE - 3;
++}
 +
-+	printk("\nCall Trace:\n");
++void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
++		unsigned long *stack,
++		struct stacktrace_ops *ops, void *data)
++{
++	const unsigned cpu = get_cpu();
++	unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
++	unsigned used = 0;
++	struct thread_info *tinfo;
 +
 +	if (!tsk)
 +		tsk = current;
 +
-+	if (call_trace >= 0) {
-+		int unw_ret = 0;
-+		struct unwind_frame_info info;
-+
-+		if (regs) {
-+			if (unwind_init_frame_info(&info, tsk, regs) == 0)
-+				unw_ret = show_trace_unwind(&info, NULL);
-+		} else if (tsk == current)
-+			unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
-+		else {
-+			if (unwind_init_blocked(&info, tsk) == 0)
-+				unw_ret = show_trace_unwind(&info, NULL);
-+		}
-+		if (unw_ret > 0) {
-+			if (call_trace == 1 && !arch_unw_user_mode(&info)) {
-+				print_symbol("DWARF2 unwinder stuck at %s\n",
-+					     UNW_PC(&info));
-+				if ((long)UNW_SP(&info) < 0) {
-+					printk("Leftover inexact backtrace:\n");
-+					stack = (unsigned long *)UNW_SP(&info);
-+				} else
-+					printk("Full inexact backtrace again:\n");
-+			} else if (call_trace >= 1)
-+				return;
-+			else
-+				printk("Full inexact backtrace again:\n");
-+		} else
-+			printk("Inexact backtrace:\n");
++	if (!stack) {
++		unsigned long dummy;
++		stack = &dummy;
++		if (tsk && tsk != current)
++			stack = (unsigned long *)tsk->thread.rsp;
 +	}
-+ 	/*
-+ 	 * Print function call entries within a stack. 'cond' is the
-+ 	 * "end of stackframe" condition, that the 'stack++'
-+ 	 * iteration will eventually trigger.
-+ 	 */
++
++	/*
++	 * Print function call entries within a stack. 'cond' is the
++	 * "end of stackframe" condition, that the 'stack++'
++	 * iteration will eventually trigger.
++	 */
 +#define HANDLE_STACK(cond) \
 +	do while (cond) { \
 +		unsigned long addr = *stack++; \
-+		if (kernel_text_address(addr)) { \
++		/* Use unlocked access here because except for NMIs	\
++		   we should be already protected against module unloads */ \
++		if (__kernel_text_address(addr)) { \
 +			/* \
 +			 * If the address is either in the text segment of the \
 +			 * kernel, or in the region which contains vmalloc'ed \
@@ -40264,7 +35578,7 @@
 +			 * down the cause of the crash will be able to figure \
 +			 * out the call path that was taken. \
 +			 */ \
-+			printk_address(addr); \
++			ops->address(data, addr);   \
 +		} \
 +	} while (0)
 +
@@ -40273,16 +35587,17 @@
 +	 * current stack address. If the stacks consist of nested
 +	 * exceptions
 +	 */
-+	for ( ; ; ) {
-+		const char *id;
++	for (;;) {
++		char *id;
 +		unsigned long *estack_end;
 +		estack_end = in_exception_stack(cpu, (unsigned long)stack,
 +						&used, &id);
 +
 +		if (estack_end) {
-+			printk(" <%s>", id);
++			if (ops->stack(data, id) < 0)
++				break;
 +			HANDLE_STACK (stack < estack_end);
-+			printk(" <EOE>");
++			ops->stack(data, "<EOE>");
 +			/*
 +			 * We link to the next stack via the
 +			 * second-to-last pointer (index -2 to end) in the
@@ -40297,7 +35612,8 @@
 +				(IRQSTACKSIZE - 64) / sizeof(*irqstack);
 +
 +			if (stack >= irqstack && stack < irqstack_end) {
-+				printk(" <IRQ>");
++				if (ops->stack(data, "IRQ") < 0)
++					break;
 +				HANDLE_STACK (stack < irqstack_end);
 +				/*
 +				 * We link to the next stack (which would be
@@ -40306,7 +35622,7 @@
 +				 */
 +				stack = (unsigned long *) (irqstack_end[-1]);
 +				irqstack_end = NULL;
-+				printk(" <EOI>");
++				ops->stack(data, "EOI");
 +				continue;
 +			}
 +		}
@@ -40314,19 +35630,59 @@
 +	}
 +
 +	/*
-+	 * This prints the process stack:
++	 * This handles the process stack:
 +	 */
-+	HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
++	tinfo = task_thread_info(tsk);
++	HANDLE_STACK (valid_stack_ptr(tinfo, stack));
 +#undef HANDLE_STACK
++	put_cpu();
++}
++EXPORT_SYMBOL(dump_trace);
++
++static void
++print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
++{
++	print_symbol(msg, symbol);
++	printk("\n");
++}
++
++static void print_trace_warning(void *data, char *msg)
++{
++	printk("%s\n", msg);
++}
++
++static int print_trace_stack(void *data, char *name)
++{
++	printk(" <%s> ", name);
++	return 0;
++}
++
++static void print_trace_address(void *data, unsigned long addr)
++{
++	printk_address(addr);
++}
++
++static struct stacktrace_ops print_trace_ops = {
++	.warning = print_trace_warning,
++	.warning_symbol = print_trace_warning_symbol,
++	.stack = print_trace_stack,
++	.address = print_trace_address,
++};
 +
++void
++show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack)
++{
++	printk("\nCall Trace:\n");
++	dump_trace(tsk, regs, stack, &print_trace_ops, NULL);
 +	printk("\n");
 +}
 +
-+static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
++static void
++_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
 +{
 +	unsigned long *stack;
 +	int i;
-+	const int cpu = safe_smp_processor_id();
++	const int cpu = smp_processor_id();
 +	unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
 +	unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
 +
@@ -40353,7 +35709,7 @@
 +		}
 +		if (i && ((i % 4) == 0))
 +			printk("\n");
-+		printk(" %016lx ", *stack++);
++		printk(" %016lx", *stack++);
 +		touch_nmi_watchdog();
 +	}
 +	show_trace(tsk, regs, rsp);
@@ -40380,7 +35736,7 @@
 +	int i;
 +	int in_kernel = !user_mode(regs);
 +	unsigned long rsp;
-+	const int cpu = safe_smp_processor_id(); 
++	const int cpu = smp_processor_id();
 +	struct task_struct *cur = cpu_pda(cpu)->pcurrent;
 +
 +		rsp = regs->rsp;
@@ -40416,36 +35772,22 @@
 +	printk("\n");
 +}	
 +
-+void handle_BUG(struct pt_regs *regs)
-+{ 
-+	struct bug_frame f;
-+	long len;
-+	const char *prefix = "";
++int is_valid_bugaddr(unsigned long rip)
++{
++	unsigned short ud2;
 +
-+	if (user_mode(regs))
-+		return; 
-+	if (__copy_from_user(&f, (const void __user *) regs->rip,
-+			     sizeof(struct bug_frame)))
-+		return; 
-+	if (f.filename >= 0 ||
-+	    f.ud2[0] != 0x0f || f.ud2[1] != 0x0b) 
-+		return;
-+	len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
-+	if (len < 0 || len >= PATH_MAX)
-+		f.filename = (int)(long)"unmapped filename";
-+	else if (len > 50) {
-+		f.filename += len - 50;
-+		prefix = "...";
-+	}
-+	printk("----------- [cut here ] --------- [please bite here ] ---------\n");
-+	printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
-+} 
++	if (__copy_from_user(&ud2, (const void __user *) rip, sizeof(ud2)))
++		return 0;
++
++	return ud2 == 0x0b0f;
++}
 +
 +#ifdef CONFIG_BUG
 +void out_of_line_bug(void)
 +{ 
 +	BUG(); 
 +} 
++EXPORT_SYMBOL(out_of_line_bug);
 +#endif
 +
 +static DEFINE_SPINLOCK(die_lock);
@@ -40454,9 +35796,11 @@
 +
 +unsigned __kprobes long oops_begin(void)
 +{
-+	int cpu = safe_smp_processor_id();
++	int cpu = smp_processor_id();
 +	unsigned long flags;
 +
++	oops_enter();
++
 +	/* racy, but better than risking deadlock. */
 +	local_irq_save(flags);
 +	if (!spin_trylock(&die_lock)) { 
@@ -40485,6 +35829,7 @@
 +		spin_unlock_irqrestore(&die_lock, flags);
 +	if (panic_on_oops)
 +		panic("Fatal exception");
++	oops_exit();
 +}
 +
 +void __kprobes __die(const char * str, struct pt_regs * regs, long err)
@@ -40515,14 +35860,15 @@
 +{
 +	unsigned long flags = oops_begin();
 +
-+	handle_BUG(regs);
++	if (!user_mode(regs))
++		report_bug(regs->rip);
++
 +	__die(str, regs, err);
 +	oops_end(flags);
 +	do_exit(SIGSEGV); 
 +}
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+void __kprobes die_nmi(char *str, struct pt_regs *regs)
++void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
 +{
 +	unsigned long flags = oops_begin();
 +
@@ -40530,19 +35876,17 @@
 +	 * We are in trouble anyway, lets at least try
 +	 * to get a message out.
 +	 */
-+	printk(str, safe_smp_processor_id());
++	printk(str, smp_processor_id());
 +	show_registers(regs);
 +	if (kexec_should_crash(current))
 +		crash_kexec(regs);
-+	if (panic_on_timeout || panic_on_oops)
-+		panic("nmi watchdog");
-+	printk("console shuts up ...\n");
++	if (do_panic || panic_on_oops)
++		panic("Non maskable interrupt");
 +	oops_end(flags);
 +	nmi_exit();
 +	local_irq_enable();
 +	do_exit(SIGSEGV);
 +}
-+#endif
 +
 +static void __kprobes do_trap(int trapnr, int signr, char *str,
 +			      struct pt_regs * regs, long error_code,
@@ -40683,8 +36027,14 @@
 +static __kprobes void
 +mem_parity_error(unsigned char reason, struct pt_regs * regs)
 +{
-+	printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
-+	printk("You probably have a hardware problem with your RAM chips\n");
++	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
++		reason);
++	printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
++
++	if (panic_on_unrecovered_nmi)
++		panic("NMI: Not continuing");
++
++	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
 +
 +#if 0 /* XEN */
 +	/* Clear and disable the memory parity error line. */
@@ -40711,9 +36061,15 @@
 +
 +static __kprobes void
 +unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-+{	printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
-+	printk("Dazed and confused, but trying to continue\n");
-+	printk("Do you have a strange power saving mode enabled?\n");
++{
++	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
++		reason);
++	printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
++
++	if (panic_on_unrecovered_nmi)
++		panic("NMI: Not continuing");
++
++	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
 +}
 +
 +/* Runs on IST stack. This code must keep interrupts off all the time.
@@ -40733,17 +36089,15 @@
 +		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
 +								== NOTIFY_STOP)
 +			return;
-+#ifdef CONFIG_X86_LOCAL_APIC
 +		/*
 +		 * Ok, so this is none of the documented NMI sources,
 +		 * so it must be the NMI watchdog.
 +		 */
-+		if (nmi_watchdog > 0) {
-+			nmi_watchdog_tick(regs,reason);
++		if (nmi_watchdog_tick(regs,reason))
 +			return;
-+		}
-+#endif
-+		unknown_nmi_error(reason, regs);
++		if (!do_nmi_callback(regs,cpu))
++			unknown_nmi_error(reason, regs);
++
 +		return;
 +	}
 +	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
@@ -41030,6 +36384,7 @@
 +		init_fpu(me);
 +	restore_fpu_checking(&me->thread.i387.fxsave);
 +	task_thread_info(me)->status |= TS_USEDFPU;
++	me->fpu_counter++;
 +}
 +
 +
@@ -41092,334 +36447,57 @@
 +}
 +
 +
-+/* Actual parsing is done early in setup.c. */
-+static int __init oops_dummy(char *s)
++static int __init oops_setup(char *s)
 +{ 
-+	panic_on_oops = 1;
-+	return 1;
++	if (!s)
++		return -EINVAL;
++	if (!strcmp(s, "panic"))
++		panic_on_oops = 1;
++	return 0;
 +} 
-+__setup("oops=", oops_dummy); 
++early_param("oops", oops_setup);
 +
 +static int __init kstack_setup(char *s)
 +{
++	if (!s)
++		return -EINVAL;
 +	kstack_depth_to_print = simple_strtoul(s,NULL,0);
-+	return 1;
-+}
-+__setup("kstack=", kstack_setup);
-+
-+#ifdef CONFIG_STACK_UNWIND
-+static int __init call_trace_setup(char *s)
-+{
-+	if (strcmp(s, "old") == 0)
-+		call_trace = -1;
-+	else if (strcmp(s, "both") == 0)
-+		call_trace = 0;
-+	else if (strcmp(s, "newfallback") == 0)
-+		call_trace = 1;
-+ 	else if (strcmp(s, "new") == 0)
-+		call_trace = 2;
-+	return 1;
-+}
-+__setup("call_trace=", call_trace_setup);
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/vmlinux.lds.S linux-2.6.18-xen/arch/x86_64/kernel/vmlinux.lds.S
---- linux-2.6.18.3/arch/x86_64/kernel/vmlinux.lds.S	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/kernel/vmlinux.lds.S	2006-11-19 14:26:34.000000000 +0100
-@@ -13,6 +13,12 @@
- OUTPUT_ARCH(i386:x86-64)
- ENTRY(phys_startup_64)
- jiffies_64 = jiffies;
-+PHDRS {
-+	text PT_LOAD FLAGS(5);	/* R_E */
-+	data PT_LOAD FLAGS(7);	/* RWE */
-+	user PT_LOAD FLAGS(7);	/* RWE */
-+	note PT_NOTE FLAGS(4);	/* R__ */
++	return 0;
 +}
- SECTIONS
- {
-   . = __START_KERNEL;
-@@ -31,7 +37,7 @@
- 	KPROBES_TEXT
- 	*(.fixup)
- 	*(.gnu.warning)
--	} = 0x9090
-+	} :text = 0x9090
-   				/* out-of-line lock text */
-   .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
- 
-@@ -57,17 +63,10 @@
-   .data : AT(ADDR(.data) - LOAD_OFFSET) {
- 	*(.data)
- 	CONSTRUCTORS
--	}
-+	} :data
- 
-   _edata = .;			/* End of data section */
- 
--  __bss_start = .;		/* BSS */
--  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
--	*(.bss.page_aligned)	
--	*(.bss)
--	}
--  __bss_stop = .;
--
-   . = ALIGN(PAGE_SIZE);
-   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
-   .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
-@@ -89,7 +88,7 @@
- #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
- 
-   . = VSYSCALL_ADDR;
--  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) }
-+  .vsyscall_0 :	 AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
-   __vsyscall_0 = VSYSCALL_VIRT_ADDR;
- 
-   . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
-@@ -132,7 +131,7 @@
-   . = ALIGN(8192);		/* init_task */
-   .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
- 	*(.data.init_task)
--  }
-+  } :data
- 
-   . = ALIGN(4096);
-   .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
-@@ -222,6 +221,14 @@
-   . = ALIGN(4096);
-   __nosave_end = .;
- 
-+  __bss_start = .;		/* BSS */
-+  . = ALIGN(4096);
-+  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
-+	*(.bss.page_aligned)
-+	*(.bss)
-+	}
-+  __bss_stop = .;
-+
-   _end = . ;
- 
-   /* Sections to be discarded */
-@@ -235,4 +242,6 @@
++early_param("kstack", kstack_setup);
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
+--- a/arch/x86_64/kernel/vmlinux.lds.S	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/kernel/vmlinux.lds.S	2007-03-14 10:55:21.000000000 +0100
+@@ -227,4 +227,6 @@
    STABS_DEBUG
  
    DWARF_DEBUG
 +
 +  NOTES
  }
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/vsyscall-xen.c linux-2.6.18-xen/arch/x86_64/kernel/vsyscall-xen.c
---- linux-2.6.18.3/arch/x86_64/kernel/vsyscall-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/vsyscall-xen.c	2006-11-19 14:26:34.000000000 +0100
-@@ -0,0 +1,239 @@
-+/*
-+ *  linux/arch/x86_64/kernel/vsyscall.c
-+ *
-+ *  Copyright (C) 2001 Andrea Arcangeli <andrea at suse.de> SuSE
-+ *  Copyright 2003 Andi Kleen, SuSE Labs.
-+ *
-+ *  Thanks to hpa at transmeta.com for some useful hint.
-+ *  Special thanks to Ingo Molnar for his early experience with
-+ *  a different vsyscall implementation for Linux/IA32 and for the name.
-+ *
-+ *  vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
-+ *  at virtual address -10Mbyte+1024bytes etc... There are at max 4
-+ *  vsyscalls. One vsyscall can reserve more than 1 slot to avoid
-+ *  jumping out of line if necessary. We cannot add more with this
-+ *  mechanism because older kernels won't return -ENOSYS.
-+ *  If we want more than four we need a vDSO.
-+ *
-+ *  Note: the concept clashes with user mode linux. If you use UML and
-+ *  want per guest time just set the kernel.vsyscall64 sysctl to 0.
-+ */
-+
-+#include <linux/time.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/timer.h>
-+#include <linux/seqlock.h>
-+#include <linux/jiffies.h>
-+#include <linux/sysctl.h>
-+
-+#include <asm/vsyscall.h>
-+#include <asm/pgtable.h>
-+#include <asm/page.h>
-+#include <asm/fixmap.h>
-+#include <asm/errno.h>
-+#include <asm/io.h>
-+
-+#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
-+
-+int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
-+seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
-+
-+#include <asm/unistd.h>
-+
-+static __always_inline void timeval_normalize(struct timeval * tv)
-+{
-+	time_t __sec;
-+
-+	__sec = tv->tv_usec / 1000000;
-+	if (__sec) {
-+		tv->tv_usec %= 1000000;
-+		tv->tv_sec += __sec;
-+	}
-+}
-+
-+static __always_inline void do_vgettimeofday(struct timeval * tv)
-+{
-+	long sequence, t;
-+	unsigned long sec, usec;
-+
-+	do {
-+		sequence = read_seqbegin(&__xtime_lock);
-+		
-+		sec = __xtime.tv_sec;
-+		usec = (__xtime.tv_nsec / 1000) +
-+			(__jiffies - __wall_jiffies) * (1000000 / HZ);
-+
-+		if (__vxtime.mode != VXTIME_HPET) {
-+			t = get_cycles_sync();
-+			if (t < __vxtime.last_tsc)
-+				t = __vxtime.last_tsc;
-+			usec += ((t - __vxtime.last_tsc) *
-+				 __vxtime.tsc_quot) >> 32;
-+			/* See comment in x86_64 do_gettimeofday. */
-+		} else {
-+			usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
-+				  __vxtime.last) * __vxtime.quot) >> 32;
-+		}
-+	} while (read_seqretry(&__xtime_lock, sequence));
-+
-+	tv->tv_sec = sec + usec / 1000000;
-+	tv->tv_usec = usec % 1000000;
-+}
-+
-+/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
-+static __always_inline void do_get_tz(struct timezone * tz)
-+{
-+	*tz = __sys_tz;
-+}
-+
-+static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
-+{
-+	int ret;
-+	asm volatile("vsysc2: syscall"
-+		: "=a" (ret)
-+		: "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
-+	return ret;
-+}
-+
-+static __always_inline long time_syscall(long *t)
-+{
-+	long secs;
-+	asm volatile("vsysc1: syscall"
-+		: "=a" (secs)
-+		: "0" (__NR_time),"D" (t) : __syscall_clobber);
-+	return secs;
-+}
-+
-+int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
-+{
-+	if (!__sysctl_vsyscall)
-+		return gettimeofday(tv,tz);
-+	if (tv)
-+		do_vgettimeofday(tv);
-+	if (tz)
-+		do_get_tz(tz);
-+	return 0;
-+}
-+
-+/* This will break when the xtime seconds get inaccurate, but that is
-+ * unlikely */
-+time_t __vsyscall(1) vtime(time_t *t)
-+{
-+	if (!__sysctl_vsyscall)
-+		return time_syscall(t);
-+	else if (t)
-+		*t = __xtime.tv_sec;		
-+	return __xtime.tv_sec;
-+}
-+
-+long __vsyscall(2) venosys_0(void)
-+{
-+	return -ENOSYS;
-+}
-+
-+long __vsyscall(3) venosys_1(void)
-+{
-+	return -ENOSYS;
-+}
-+
-+#ifdef CONFIG_SYSCTL
-+
-+#define SYSCALL 0x050f
-+#define NOP2    0x9090
-+
-+/*
-+ * NOP out syscall in vsyscall page when not needed.
-+ */
-+static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
-+                        void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+	extern u16 vsysc1, vsysc2;
-+	u16 *map1, *map2;
-+	int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
-+	if (!write)
-+		return ret;
-+	/* gcc has some trouble with __va(__pa()), so just do it this
-+	   way. */
-+	map1 = ioremap(__pa_symbol(&vsysc1), 2);
-+	if (!map1)
-+		return -ENOMEM;
-+	map2 = ioremap(__pa_symbol(&vsysc2), 2);
-+	if (!map2) {
-+		ret = -ENOMEM;
-+		goto out;
-+	}
-+	if (!sysctl_vsyscall) {
-+		*map1 = SYSCALL;
-+		*map2 = SYSCALL;
-+	} else {
-+		*map1 = NOP2;
-+		*map2 = NOP2;
-+	}
-+	iounmap(map2);
-+out:
-+	iounmap(map1);
-+	return ret;
-+}
-+
-+static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
-+				void __user *oldval, size_t __user *oldlenp,
-+				void __user *newval, size_t newlen,
-+				void **context)
-+{
-+	return -ENOSYS;
-+}
-+
-+static ctl_table kernel_table2[] = {
-+	{ .ctl_name = 99, .procname = "vsyscall64",
-+	  .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
-+	  .strategy = vsyscall_sysctl_nostrat,
-+	  .proc_handler = vsyscall_sysctl_change },
-+	{ 0, }
-+};
-+
-+static ctl_table kernel_root_table2[] = {
-+	{ .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
-+	  .child = kernel_table2 },
-+	{ 0 },
-+};
-+
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
+--- a/arch/x86_64/kernel/vsyscall.c	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/kernel/vsyscall.c	2007-03-14 10:55:21.000000000 +0100
+@@ -246,6 +246,7 @@
+ 
+ #endif
+ 
++#ifndef CONFIG_XEN
+ /* Assume __initcall executes before all user space. Hopefully kmod
+    doesn't violate that. We'll find out if it does. */
+ static void __cpuinit vsyscall_set_cpu(int cpu)
+@@ -282,6 +283,7 @@
+ 		smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
+ 	return NOTIFY_DONE;
+ }
 +#endif
-+
-+static void __init map_vsyscall(void)
-+{
-+	extern char __vsyscall_0;
-+	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
-+
-+	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
-+}
-+
+ 
+ static void __init map_vsyscall(void)
+ {
+@@ -292,6 +294,17 @@
+ 	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
+ }
+ 
 +#ifdef CONFIG_XEN
 +static void __init map_vsyscall_user(void)
 +{
@@ -41431,27 +36509,30 @@
 +}
 +#endif
 +
-+static int __init vsyscall_init(void)
-+{
-+	BUG_ON(((unsigned long) &vgettimeofday !=
-+			VSYSCALL_ADDR(__NR_vgettimeofday)));
-+	BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
-+	BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
-+	map_vsyscall();
+ static int __init vsyscall_init(void)
+ {
+ 	BUG_ON(((unsigned long) &vgettimeofday !=
+@@ -300,11 +313,17 @@
+ 	BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
+ 	BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
+ 	map_vsyscall();
 +#ifdef CONFIG_XEN
 +	map_vsyscall_user();
 +	sysctl_vsyscall = 0; /* disable vgettimeofay() */
 +#endif
-+#ifdef CONFIG_SYSCTL
-+	register_sysctl_table(kernel_root_table2, 0);
+ #ifdef CONFIG_SYSCTL
+ 	register_sysctl_table(kernel_root_table2, 0);
+ #endif
++#ifndef CONFIG_XEN
+ 	on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
+ 	hotcpu_notifier(cpu_vsyscall_notifier, 0);
 +#endif
-+	return 0;
-+}
-+
-+__initcall(vsyscall_init);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/kernel/xen_entry.S linux-2.6.18-xen/arch/x86_64/kernel/xen_entry.S
---- linux-2.6.18.3/arch/x86_64/kernel/xen_entry.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/kernel/xen_entry.S	2006-11-19 14:26:34.000000000 +0100
+ 	return 0;
+ }
+ 
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/kernel/xen_entry.S b/arch/x86_64/kernel/xen_entry.S
+--- a/arch/x86_64/kernel/xen_entry.S	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/kernel/xen_entry.S	2007-03-14 10:55:21.000000000 +0100
 @@ -0,0 +1,40 @@
 +/*
 + * Copied from arch/xen/i386/kernel/entry.S
@@ -41493,11 +36574,11 @@
 +VGCF_IN_SYSCALL = (1<<8)
 +        
 +	
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/Makefile linux-2.6.18-xen/arch/x86_64/Makefile
---- linux-2.6.18.3/arch/x86_64/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/Makefile	2006-11-19 14:26:33.000000000 +0100
-@@ -32,6 +32,10 @@
- cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/Makefile b/arch/x86_64/Makefile
+--- a/arch/x86_64/Makefile	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/Makefile	2007-03-14 10:55:21.000000000 +0100
+@@ -36,6 +36,10 @@
+ 	$(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
  cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
  
 +cppflags-$(CONFIG_XEN) += \
@@ -41507,13 +36588,13 @@
  cflags-y += -m64
  cflags-y += -mno-red-zone
  cflags-y += -mcmodel=kernel
-@@ -74,6 +78,21 @@
+@@ -90,6 +94,21 @@
  PHONY += bzImage bzlilo install archmrproper \
  	 fdimage fdimage144 fdimage288 isoimage archclean
  
 +ifdef CONFIG_XEN
 +CPPFLAGS := -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
-+head-y := arch/x86_64/kernel/head-xen.o arch/x86_64/kernel/head64-xen.o arch/x86_64/kernel/init_task.o
++head-y := arch/x86_64/kernel/head-xen.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o
 +LDFLAGS_vmlinux := -e _start
 +boot := arch/i386/boot-xen
 +.PHONY: vmlinuz
@@ -41529,7 +36610,7 @@
  #Default target when executing "make"
  all: bzImage
  
-@@ -94,6 +113,7 @@
+@@ -110,6 +129,7 @@
  
  install:
  	$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ 
@@ -41537,10 +36618,10 @@
  
  archclean:
  	$(Q)$(MAKE) $(clean)=$(boot)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/mm/fault-xen.c linux-2.6.18-xen/arch/x86_64/mm/fault-xen.c
---- linux-2.6.18.3/arch/x86_64/mm/fault-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/mm/fault-xen.c	2006-11-19 14:26:34.000000000 +0100
-@@ -0,0 +1,723 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/mm/fault-xen.c b/arch/x86_64/mm/fault-xen.c
+--- a/arch/x86_64/mm/fault-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/mm/fault-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,717 @@
 +/*
 + *  linux/arch/x86-64/mm/fault.c
 + *
@@ -41566,9 +36647,9 @@
 +#include <linux/compiler.h>
 +#include <linux/module.h>
 +#include <linux/kprobes.h>
++#include <linux/uaccess.h>
 +
 +#include <asm/system.h>
-+#include <asm/uaccess.h>
 +#include <asm/pgalloc.h>
 +#include <asm/smp.h>
 +#include <asm/tlbflush.h>
@@ -41583,8 +36664,7 @@
 +#define PF_RSVD	(1<<3)
 +#define PF_INSTR	(1<<4)
 +
-+#ifdef CONFIG_KPROBES
-+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
 +
 +/* Hook to register for page fault notifications */
 +int register_page_fault_notifier(struct notifier_block *nb)
@@ -41592,11 +36672,13 @@
 +	vmalloc_sync_all();
 +	return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
 +}
++EXPORT_SYMBOL_GPL(register_page_fault_notifier);
 +
 +int unregister_page_fault_notifier(struct notifier_block *nb)
 +{
 +	return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
 +}
++EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
 +
 +static inline int notify_page_fault(enum die_val val, const char *str,
 +			struct pt_regs *regs, long err, int trap, int sig)
@@ -41610,13 +36692,6 @@
 +	};
 +	return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
 +}
-+#else
-+static inline int notify_page_fault(enum die_val val, const char *str,
-+			struct pt_regs *regs, long err, int trap, int sig)
-+{
-+	return NOTIFY_DONE;
-+}
-+#endif
 +
 +void bust_spinlocks(int yes)
 +{
@@ -41654,7 +36729,7 @@
 +	if (error_code & PF_INSTR)
 +		return 0;
 +	
-+	instr = (unsigned char *)convert_rip_to_linear(current, regs);
++	instr = (unsigned char __user *)convert_rip_to_linear(current, regs);
 +	max_instr = instr + 15;
 +
 +	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
@@ -41665,7 +36740,7 @@
 +		unsigned char instr_hi;
 +		unsigned char instr_lo;
 +
-+		if (__get_user(opcode, instr))
++		if (probe_kernel_address(instr, opcode))
 +			break; 
 +
 +		instr_hi = opcode & 0xf0; 
@@ -41703,7 +36778,7 @@
 +		case 0x00:
 +			/* Prefetch instruction is 0x0F0D or 0x0F18 */
 +			scan_more = 0;
-+			if (__get_user(opcode, instr)) 
++			if (probe_kernel_address(instr, opcode))
 +				break;
 +			prefetch = (instr_lo == 0xF) &&
 +				(opcode == 0x0D || opcode == 0x18);
@@ -41719,7 +36794,7 @@
 +static int bad_address(void *p) 
 +{ 
 +	unsigned long dummy;
-+	return __get_user(dummy, (unsigned long *)p);
++	return probe_kernel_address((unsigned long *)p, dummy);
 +} 
 +
 +void dump_pagetable(unsigned long address)
@@ -41791,7 +36866,7 @@
 +
 +int unhandled_signal(struct task_struct *tsk, int sig)
 +{
-+	if (tsk->pid == 1)
++	if (is_init(tsk))
 +		return 1;
 +	if (tsk->ptrace & PT_PTRACED)
 +		return 0;
@@ -41843,7 +36918,7 @@
 +	if (pgd_none(*pgd))
 +		set_pgd(pgd, *pgd_ref);
 +	else
-+		BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
 +
 +	/* Below here mismatches are bugs because these lower tables
 +	   are shared */
@@ -41852,7 +36927,7 @@
 +	pud_ref = pud_offset(pgd_ref, address);
 +	if (pud_none(*pud_ref))
 +		return -1;
-+	if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
++	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
 +		BUG();
 +	pmd = pmd_offset(pud, address);
 +	pmd_ref = pmd_offset(pud_ref, address);
@@ -42073,7 +37148,7 @@
 +		case PF_PROT:		/* read, present */
 +			goto bad_area;
 +		case 0:			/* read, not present */
-+			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++			if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
 +				goto bad_area;
 +	}
 +
@@ -42189,7 +37264,7 @@
 + */
 +out_of_memory:
 +	up_read(&mm->mmap_sem);
-+	if (current->pid == 1) { 
++	if (is_init(current)) {
 +		yield();
 +		goto again;
 +	}
@@ -42244,7 +37319,7 @@
 +				if (pgd_none(*pgd))
 +					set_pgd(pgd, *pgd_ref);
 +				else
-+					BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++					BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
 +			}
 +			spin_unlock(&pgd_lock);
 +			set_bit(pgd_index(address), insync);
@@ -42264,10 +37339,10 @@
 +	return 1;
 +}
 +__setup("pagefaulttrace", enable_pagefaulttrace);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/mm/init-xen.c linux-2.6.18-xen/arch/x86_64/mm/init-xen.c
---- linux-2.6.18.3/arch/x86_64/mm/init-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/mm/init-xen.c	2006-11-19 14:26:34.000000000 +0100
-@@ -0,0 +1,1200 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/mm/init-xen.c b/arch/x86_64/mm/init-xen.c
+--- a/arch/x86_64/mm/init-xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/arch/x86_64/mm/init-xen.c	2007-03-14 10:55:21.000000000 +0100
+@@ -0,0 +1,1148 @@
 +/*
 + *  linux/arch/x86_64/mm/init.c
 + *
@@ -42668,7 +37743,6 @@
 +
 +	if (after_bootmem) {
 +		void *adr = (void *)get_zeroed_page(GFP_ATOMIC);
-+
 +		*phys = __pa(adr);
 +		return adr;
 +	}
@@ -42720,7 +37794,6 @@
 +
 +	/* actually usually some more */
 +	if (size >= LARGE_PAGE_SIZE) { 
-+		printk("SMBIOS area too long %lu\n", size);
 +		return NULL;
 +	}
 +	set_pmd(temp_mappings[0].pmd,  __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
@@ -42742,13 +37815,15 @@
 +#endif /* !CONFIG_XEN */
 +
 +static void __meminit
-+phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
++phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
 +{
-+	int i, k;
++	int i = pmd_index(address);
++	int k;
 +
-+	for (i = 0; i < PTRS_PER_PMD; pmd++, i++) {
++	for (; i < PTRS_PER_PMD; i++) {
 +		unsigned long pte_phys;
 +		pte_t *pte, *pte_save;
++		pmd_t *pmd = pmd_page + pmd_index(address);
 +
 +		if (address >= end) {
 +			if (!after_bootmem)
@@ -42756,6 +37831,10 @@
 +					set_pmd(pmd, __pmd(0));
 +			break;
 +		}
++
++		if (pmd_val(*pmd))
++			continue;
++
 +		pte = alloc_static_page(&pte_phys);
 +		pte_save = pte;
 +		for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
@@ -42781,40 +37860,36 @@
 +static void __meminit
 +phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
 +{
-+	pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
-+
-+	if (pmd_none(*pmd)) {
-+		spin_lock(&init_mm.page_table_lock);
-+		phys_pmd_init(pmd, address, end);
-+		spin_unlock(&init_mm.page_table_lock);
-+		__flush_tlb_all();
-+	}
++	pmd_t *pmd = pmd_offset(pud,0);
++	spin_lock(&init_mm.page_table_lock);
++	phys_pmd_init(pmd, address, end);
++	spin_unlock(&init_mm.page_table_lock);
++	__flush_tlb_all();
 +}
 +
-+static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
++static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
 +{ 
-+	long i = pud_index(address);
-+
-+	pud = pud + i;
++	int i = pud_index(addr);
 +
-+	if (after_bootmem && pud_val(*pud)) {
-+		phys_pmd_update(pud, address, end);
-+		return;
-+	}
 +
-+	for (; i < PTRS_PER_PUD; pud++, i++) {
-+		unsigned long paddr, pmd_phys;
++	for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
++		unsigned long pmd_phys;
++		pud_t *pud = pud_page + pud_index(addr);
 +		pmd_t *pmd;
 +
-+		paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
-+		if (paddr >= end)
++		if (addr >= end)
 +			break;
 +
++		if (pud_val(*pud)) {
++			phys_pmd_update(pud, addr, end);
++			continue;
++		}
++
 +		pmd = alloc_static_page(&pmd_phys);
 +		early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
 +		spin_lock(&init_mm.page_table_lock);
 +		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
-+		phys_pmd_init(pmd, paddr, end);
++		phys_pmd_init(pmd, addr, end);
 +		spin_unlock(&init_mm.page_table_lock);
 +	}
 +	__flush_tlb();
@@ -43018,78 +38093,19 @@
 +#endif
 +}
 +
-+/* Compute zone sizes for the DMA and DMA32 zones in a node. */
-+__init void
-+size_zones(unsigned long *z, unsigned long *h,
-+	   unsigned long start_pfn, unsigned long end_pfn)
-+{
-+ 	int i;
-+#ifndef CONFIG_XEN
-+ 	unsigned long w;
-+#endif
-+
-+ 	for (i = 0; i < MAX_NR_ZONES; i++)
-+ 		z[i] = 0;
-+
-+#ifndef CONFIG_XEN
-+ 	if (start_pfn < MAX_DMA_PFN)
-+ 		z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
-+ 	if (start_pfn < MAX_DMA32_PFN) {
-+ 		unsigned long dma32_pfn = MAX_DMA32_PFN;
-+ 		if (dma32_pfn > end_pfn)
-+ 			dma32_pfn = end_pfn;
-+ 		z[ZONE_DMA32] = dma32_pfn - start_pfn;
-+ 	}
-+ 	z[ZONE_NORMAL] = end_pfn - start_pfn;
-+
-+ 	/* Remove lower zones from higher ones. */
-+ 	w = 0;
-+ 	for (i = 0; i < MAX_NR_ZONES; i++) {
-+ 		if (z[i])
-+ 			z[i] -= w;
-+ 	        w += z[i];
-+	}
-+
-+	/* Compute holes */
-+	w = start_pfn;
-+	for (i = 0; i < MAX_NR_ZONES; i++) {
-+		unsigned long s = w;
-+		w += z[i];
-+		h[i] = e820_hole_size(s, w);
-+	}
-+
-+	/* Add the space pace needed for mem_map to the holes too. */
-+	for (i = 0; i < MAX_NR_ZONES; i++)
-+		h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
-+
-+	/* The 16MB DMA zone has the kernel and other misc mappings.
-+ 	   Account them too */
-+	if (h[ZONE_DMA]) {
-+		h[ZONE_DMA] += dma_reserve;
-+		if (h[ZONE_DMA] >= z[ZONE_DMA]) {
-+			printk(KERN_WARNING
-+				"Kernel too large and filling up ZONE_DMA?\n");
-+			h[ZONE_DMA] = z[ZONE_DMA];
-+		}
-+	}
-+#else
-+	z[ZONE_DMA] = end_pfn;
-+ 	for (i = 0; i < MAX_NR_ZONES; i++)
-+ 		h[i] = 0;
-+#endif
-+}
-+
 +#ifndef CONFIG_NUMA
 +void __init paging_init(void)
 +{
-+	unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
 +	int i;
++	unsigned long max_zone_pfns[MAX_NR_ZONES];
++	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
++	max_zone_pfns[ZONE_DMA] = end_pfn;
++	max_zone_pfns[ZONE_DMA32] = end_pfn;
++	max_zone_pfns[ZONE_NORMAL] = end_pfn;
 +
 +	memory_present(0, 0, end_pfn);
 +	sparse_init();
-+	size_zones(zones, holes, 0, end_pfn);
-+	free_area_init_node(0, NODE_DATA(0), zones,
-+			    __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
++	free_area_init_nodes(max_zone_pfns);
 +
 +	/* Switch to the real shared_info page, and clear the
 +	 * dummy page. */
@@ -43147,7 +38163,6 @@
 +/*
 + * Memory hotplug specific functions
 + */
-+
 +void online_page(struct page *page)
 +{
 +	ClearPageReserved(page);
@@ -43159,36 +38174,23 @@
 +
 +#ifdef CONFIG_MEMORY_HOTPLUG
 +/*
-+ * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
-+ *	via probe interface of sysfs. If acpi notifies hot-add event, then it
-+ *	can tell node id by searching dsdt. But, probe interface doesn't have
-+ *	node id. So, return 0 as node id at this time.
-+ */
-+#ifdef CONFIG_NUMA
-+int memory_add_physaddr_to_nid(u64 start)
-+{
-+	return 0;
-+}
-+#endif
-+
-+/*
 + * Memory is added always to NORMAL zone. This means you will never get
 + * additional DMA/DMA32 memory.
 + */
 +int arch_add_memory(int nid, u64 start, u64 size)
 +{
 +	struct pglist_data *pgdat = NODE_DATA(nid);
-+	struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
++	struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
 +	unsigned long start_pfn = start >> PAGE_SHIFT;
 +	unsigned long nr_pages = size >> PAGE_SHIFT;
 +	int ret;
 +
++	init_memory_mapping(start, (start + size -1));
++
 +	ret = __add_pages(zone, start_pfn, nr_pages);
 +	if (ret)
 +		goto error;
 +
-+	init_memory_mapping(start, (start + size -1));
-+
 +	return ret;
 +error:
 +	printk("%s: Problem encountered in __add_pages!\n", __func__);
@@ -43202,7 +38204,17 @@
 +}
 +EXPORT_SYMBOL_GPL(remove_memory);
 +
-+#else /* CONFIG_MEMORY_HOTPLUG */
++#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
++int memory_add_physaddr_to_nid(u64 start)
++{
++	return 0;
++}
++EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
++#endif
++
++#endif /* CONFIG_MEMORY_HOTPLUG */
++
++#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
 +/*
 + * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
 + * just online the pages.
@@ -43228,7 +38240,7 @@
 +	}
 +	return err;
 +}
-+#endif /* CONFIG_MEMORY_HOTPLUG */
++#endif
 +
 +static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
 +			 kcore_vsyscall;
@@ -43245,12 +38257,6 @@
 +
 +	pci_iommu_alloc();
 +
-+	/* How many end-of-memory variables you have, grandma! */
-+	max_low_pfn = end_pfn;
-+	max_pfn = end_pfn;
-+	num_physpages = end_pfn;
-+	high_memory = (void *) __va(end_pfn * PAGE_SIZE);
-+
 +	/* clear the zero-page */
 +	memset(empty_zero_page, 0, PAGE_SIZE);
 +
@@ -43268,7 +38274,9 @@
 +		init_page_count(&mem_map[pfn]);
 +		totalram_pages++;
 +	}
-+	reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
++	reservedpages = end_pfn - totalram_pages -
++					absent_pages_in_range(0, end_pfn);
++
 +
 +	after_bootmem = 1;
 +
@@ -43307,7 +38315,6 @@
 +
 +void free_init_pages(char *what, unsigned long begin, unsigned long end)
 +{
-+#ifdef __DO_LATER__
 +	unsigned long addr;
 +
 +	if (begin >= end)
@@ -43322,14 +38329,13 @@
 +		free_page(addr);
 +		totalram_pages++;
 +	}
-+#endif
 +}
 +
 +void free_initmem(void)
 +{
-+#ifdef __DO_LATER__
 +	memset(__initdata_begin, POISON_FREE_INITDATA,
 +	       __initdata_end - __initdata_begin);
++#ifdef __DO_LATER__
 +	free_init_pages("unused kernel memory",
 +			(unsigned long)(&__init_begin),
 +			(unsigned long)(&__init_end));
@@ -43347,6 +38353,7 @@
 +
 +	printk ("Write protecting the kernel read-only data: %luk\n",
 +			(__end_rodata - __start_rodata) >> 10);
++
 +	/*
 +	 * change_page_attr_addr() requires a global_flush_tlb() call after it.
 +	 * We do this after the printk so that if something went wrong in the
@@ -43366,15 +38373,30 @@
 +
 +void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 
 +{ 
-+	/* Should check here against the e820 map to avoid double free */ 
 +#ifdef CONFIG_NUMA
 +	int nid = phys_to_nid(phys);
++#endif
++	unsigned long pfn = phys >> PAGE_SHIFT;
++	if (pfn >= end_pfn) {
++		/* This can happen with kdump kernels when accessing firmware
++		   tables. */
++		if (pfn < end_pfn_map)
++			return;
++		printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
++				phys, len);
++		return;
++	}
++
++	/* Should check here against the e820 map to avoid double free */
++#ifdef CONFIG_NUMA
 +  	reserve_bootmem_node(NODE_DATA(nid), phys, len);
 +#else       		
 +	reserve_bootmem(phys, len);    
 +#endif
-+	if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
++	if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
 +		dma_reserve += len / PAGE_SIZE;
++		set_dma_reserve(dma_reserve);
++	}
 +}
 +
 +int kern_addr_valid(unsigned long addr) 
@@ -43433,14 +38455,15 @@
 +__initcall(x8664_sysctl_init);
 +#endif
 +
-+/* A pseudo VMAs to allow ptrace access for the vsyscall page.   This only
++/* A pseudo VMA to allow ptrace access for the vsyscall page.  This only
 +   covers the 64bit vsyscall page now. 32bit has a real VMA now and does
 +   not need special handling anymore. */
 +
 +static struct vm_area_struct gate_vma = {
 +	.vm_start = VSYSCALL_START,
-+	.vm_end = VSYSCALL_END,
-+	.vm_page_prot = PAGE_READONLY
++	.vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
++	.vm_page_prot = PAGE_READONLY_EXEC,
++	.vm_flags = VM_READ | VM_EXEC
 +};
 +
 +struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
@@ -43468,9 +38491,9 @@
 +{
 +	return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/mm/Makefile linux-2.6.18-xen/arch/x86_64/mm/Makefile
---- linux-2.6.18.3/arch/x86_64/mm/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/mm/Makefile	2006-11-19 14:26:34.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/mm/Makefile b/arch/x86_64/mm/Makefile
+--- a/arch/x86_64/mm/Makefile	2007-03-12 21:58:06.000000000 +0100
++++ b/arch/x86_64/mm/Makefile	2007-03-14 10:55:21.000000000 +0100
 @@ -9,3 +9,13 @@
  obj-$(CONFIG_ACPI_NUMA) += srat.o
  
@@ -43485,25 +38508,13 @@
 +
 +obj-y := $(call cherrypickxen, $(obj-y))
 +endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/mm/pageattr-xen.c linux-2.6.18-xen/arch/x86_64/mm/pageattr-xen.c
---- linux-2.6.18.3/arch/x86_64/mm/pageattr-xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/arch/x86_64/mm/pageattr-xen.c	2006-12-05 18:42:36.000000000 +0100
-@@ -0,0 +1,398 @@
-+/* 
-+ * Copyright 2002 Andi Kleen, SuSE Labs. 
-+ * Thanks to Ben LaHaise for precious feedback.
-+ */ 
-+
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+#include <asm/tlbflush.h>
-+#include <asm/io.h>
-+
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
+--- a/arch/x86_64/mm/pageattr.c	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/mm/pageattr.c	2007-03-14 10:55:21.000000000 +0100
+@@ -13,6 +13,167 @@
+ #include <asm/tlbflush.h>
+ #include <asm/io.h>
+ 
 +#ifdef CONFIG_XEN
 +#include <asm/pgalloc.h>
 +#include <asm/mmu_context.h>
@@ -43665,145 +38676,13 @@
 +}
 +#endif	/* CONFIG_XEN */
 +
-+static inline pte_t *lookup_address(unsigned long address) 
-+{ 
-+	pgd_t *pgd = pgd_offset_k(address);
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	if (pgd_none(*pgd))
-+		return NULL;
-+	pud = pud_offset(pgd, address);
-+	if (!pud_present(*pud))
-+		return NULL; 
-+	pmd = pmd_offset(pud, address);
-+	if (!pmd_present(*pmd))
-+		return NULL; 
-+	if (pmd_large(*pmd))
-+		return (pte_t *)pmd;
-+	pte = pte_offset_kernel(pmd, address);
-+	if (pte && !pte_present(*pte))
-+		pte = NULL; 
-+	return pte;
-+} 
-+
-+static struct page *split_large_page(unsigned long address, pgprot_t prot,
-+				     pgprot_t ref_prot)
-+{ 
-+	int i; 
-+	unsigned long addr;
-+	struct page *base = alloc_pages(GFP_KERNEL, 0);
-+	pte_t *pbase;
-+	if (!base) 
-+		return NULL;
-+	/*
-+	 * page_private is used to track the number of entries in
-+	 * the page table page have non standard attributes.
-+	 */
-+	SetPagePrivate(base);
-+	page_private(base) = 0;
-+
-+	address = __pa(address);
-+	addr = address & LARGE_PAGE_MASK; 
-+	pbase = (pte_t *)page_address(base);
-+	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
-+		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
-+				   addr == address ? prot : ref_prot);
-+	}
-+	return base;
-+} 
-+
-+
-+static void flush_kernel_map(void *address) 
-+{
-+	if (0 && address && cpu_has_clflush) {
-+		/* is this worth it? */ 
-+		int i;
-+		for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 
-+			asm volatile("clflush (%0)" :: "r" (address + i)); 
-+	} else
-+		asm volatile("wbinvd":::"memory"); 
-+	if (address)
-+		__flush_tlb_one(address);
-+	else
-+		__flush_tlb_all();
-+}
-+
-+
-+static inline void flush_map(unsigned long address)
-+{	
-+	on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
-+}
-+
-+static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
-+
-+static inline void save_page(struct page *fpage)
-+{
-+	fpage->lru.next = (struct list_head *)deferred_pages;
-+	deferred_pages = fpage;
-+}
-+
-+/* 
-+ * No more special protections in this 2/4MB area - revert to a
-+ * large page again. 
-+ */
-+static void revert_page(unsigned long address, pgprot_t ref_prot)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t large_pte;
-+
-+	pgd = pgd_offset_k(address);
-+	BUG_ON(pgd_none(*pgd));
-+	pud = pud_offset(pgd,address);
-+	BUG_ON(pud_none(*pud));
-+	pmd = pmd_offset(pud, address);
-+	BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
-+	pgprot_val(ref_prot) |= _PAGE_PSE;
-+	large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
-+	set_pte((pte_t *)pmd, large_pte);
-+}      
-+
-+static int
-+__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
-+				   pgprot_t ref_prot)
-+{ 
-+	pte_t *kpte; 
-+	struct page *kpte_page;
-+	unsigned kpte_flags;
-+	pgprot_t ref_prot2;
-+	kpte = lookup_address(address);
-+	if (!kpte) return 0;
-+	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
-+	kpte_flags = pte_val(*kpte); 
-+	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
-+		if ((kpte_flags & _PAGE_PSE) == 0) { 
-+			set_pte(kpte, pfn_pte(pfn, prot));
-+		} else {
-+ 			/*
-+			 * split_large_page will take the reference for this
-+			 * change_page_attr on the split page.
-+ 			 */
-+
-+			struct page *split;
-+			ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
-+
-+			split = split_large_page(address, prot, ref_prot2);
-+			if (!split)
-+				return -ENOMEM;
-+			set_pte(kpte,mk_pte(split, ref_prot2));
-+			kpte_page = split;
-+		}	
-+		page_private(kpte_page)++;
-+	} else if ((kpte_flags & _PAGE_PSE) == 0) { 
-+		set_pte(kpte, pfn_pte(pfn, ref_prot));
-+		BUG_ON(page_private(kpte_page) == 0);
-+		page_private(kpte_page)--;
-+	} else
-+		BUG();
-+
-+	/* on x86-64 the direct mapping set at boot is not using 4k pages */
+ static inline pte_t *lookup_address(unsigned long address) 
+ { 
+ 	pgd_t *pgd = pgd_offset_k(address);
+@@ -154,7 +315,17 @@
+ 		BUG();
+ 
+ 	/* on x86-64 the direct mapping set at boot is not using 4k pages */
 +	/*
 +	 * ..., but the XEN guest kernels (currently) do:
 +	 * If the pte was reserved, it means it was created at boot
@@ -43811,85 +38690,16 @@
 +	 * replace it with a large page.
 +	 */
 +#ifndef CONFIG_XEN
-+ 	BUG_ON(PageReserved(kpte_page));
+  	BUG_ON(PageReserved(kpte_page));
 +#else
 +	if(!PageReserved(kpte_page))
 +#endif
-+		if (page_private(kpte_page) == 0) {
-+			save_page(kpte_page);
-+			revert_page(address, ref_prot);
-+		}
-+	return 0;
-+} 
-+
-+/*
-+ * Change the page attributes of an page in the linear mapping.
-+ *
-+ * This should be used when a page is mapped with a different caching policy
-+ * than write-back somewhere - some CPUs do not like it when mappings with
-+ * different caching policies exist. This changes the page attributes of the
-+ * in kernel linear mapping too.
-+ * 
-+ * The caller needs to ensure that there are no conflicting mappings elsewhere.
-+ * This function only deals with the kernel linear map.
-+ * 
-+ * Caller must call global_flush_tlb() after this.
-+ */
-+int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
-+{
-+	int err = 0; 
-+	int i; 
-+
-+	down_write(&init_mm.mmap_sem);
-+	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
-+		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
-+
-+		err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
-+		if (err) 
-+			break; 
-+		/* Handle kernel mapping too which aliases part of the
-+		 * lowmem */
-+		if (__pa(address) < KERNEL_TEXT_SIZE) {
-+			unsigned long addr2;
-+			pgprot_t prot2 = prot;
-+			addr2 = __START_KERNEL_map + __pa(address);
-+ 			pgprot_val(prot2) &= ~_PAGE_NX;
-+			err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
-+		} 
-+	} 	
-+	up_write(&init_mm.mmap_sem); 
-+	return err;
-+}
-+
-+/* Don't call this for MMIO areas that may not have a mem_map entry */
-+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-+{
-+	unsigned long addr = (unsigned long)page_address(page);
-+	return change_page_attr_addr(addr, numpages, prot);
-+}
-+
-+void global_flush_tlb(void)
-+{ 
-+	struct page *dpage;
-+
-+	down_read(&init_mm.mmap_sem);
-+	dpage = xchg(&deferred_pages, NULL);
-+	up_read(&init_mm.mmap_sem);
-+
-+	flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
-+	while (dpage) {
-+		struct page *tmp = dpage;
-+		dpage = (struct page *)dpage->lru.next;
-+		ClearPagePrivate(tmp);
-+		__free_page(tmp);
-+	} 
-+} 
-+
-+EXPORT_SYMBOL(change_page_attr);
-+EXPORT_SYMBOL(global_flush_tlb);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/oprofile/Makefile linux-2.6.18-xen/arch/x86_64/oprofile/Makefile
---- linux-2.6.18.3/arch/x86_64/oprofile/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/oprofile/Makefile	2006-11-19 14:26:34.000000000 +0100
+ 
+ 	if (page_private(kpte_page) == 0) {
+ 		save_page(kpte_page);
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/oprofile/Makefile b/arch/x86_64/oprofile/Makefile
+--- a/arch/x86_64/oprofile/Makefile	2007-03-12 21:58:06.000000000 +0100
++++ b/arch/x86_64/oprofile/Makefile	2007-03-14 10:55:21.000000000 +0100
 @@ -11,9 +11,12 @@
  	oprofilefs.o oprofile_stats.o \
  	timer_int.o )
@@ -43904,10 +38714,10 @@
 -
 +endif
  oprofile-y = $(DRIVER_OBJS) $(addprefix ../../i386/oprofile/, $(OPROFILE-y))
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/arch/x86_64/pci/Makefile linux-2.6.18-xen/arch/x86_64/pci/Makefile
---- linux-2.6.18.3/arch/x86_64/pci/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/arch/x86_64/pci/Makefile	2006-11-19 14:26:34.000000000 +0100
-@@ -15,11 +15,23 @@
+diff -x '.hg*' -x '.git*' -urN a/arch/x86_64/pci/Makefile b/arch/x86_64/pci/Makefile
+--- a/arch/x86_64/pci/Makefile	2007-03-16 18:49:21.000000000 +0100
++++ b/arch/x86_64/pci/Makefile	2007-03-14 10:55:21.000000000 +0100
+@@ -15,8 +15,13 @@
  
  obj-$(CONFIG_NUMA)	+= k8-bus.o
  
@@ -43921,19 +38731,18 @@
  legacy-y += ../../i386/pci/legacy.o
  irq-y    += ../../i386/pci/irq.o
  common-y += ../../i386/pci/common.o
- fixup-y  += ../../i386/pci/fixup.o
- i386-y  += ../../i386/pci/i386.o
- init-y += ../../i386/pci/init.o
-+
-+ifdef CONFIG_XEN
-+irq-y		:= ../../i386/pci/irq-xen.o
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/acpi/Kconfig linux-2.6.18-xen/drivers/acpi/Kconfig
---- linux-2.6.18.3/drivers/acpi/Kconfig	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/acpi/Kconfig	2006-11-19 14:26:34.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
+--- a/drivers/acpi/Kconfig	2007-03-16 18:49:21.000000000 +0100
++++ b/drivers/acpi/Kconfig	2007-03-14 10:55:22.000000000 +0100
+@@ -11,7 +11,7 @@
+ 	bool "ACPI Support"
+ 	depends on IA64 || X86
+ 	depends on PCI
+-	depends on PM
++	depends on PM || XEN
+ 	default y
+ 	---help---
+ 	  Advanced Configuration and Power Interface (ACPI) support for 
 @@ -45,7 +45,7 @@
  
  config ACPI_SLEEP
@@ -43943,7 +38752,7 @@
  	depends on PM
  	default y
  	---help---
-@@ -305,6 +305,7 @@
+@@ -309,6 +309,7 @@
  config X86_PM_TIMER
  	bool "Power Management Timer Support" if EMBEDDED
  	depends on X86
@@ -43951,9 +38760,9 @@
  	default y
  	help
  	  The Power Management Timer is available on all ACPI-capable,
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/char/hangcheck-timer.c linux-2.6.18-xen/drivers/char/hangcheck-timer.c
---- linux-2.6.18.3/drivers/char/hangcheck-timer.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/char/hangcheck-timer.c	2006-11-19 14:26:36.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
+--- a/drivers/char/hangcheck-timer.c	2007-03-16 18:49:21.000000000 +0100
++++ b/drivers/char/hangcheck-timer.c	2007-03-14 10:55:23.000000000 +0100
 @@ -117,7 +117,7 @@
  __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
  #endif /* not MODULE */
@@ -43963,10 +38772,10 @@
  # define HAVE_MONOTONIC
  # define TIMER_FREQ 1000000000ULL
  #elif defined(CONFIG_IA64)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/char/mem.c linux-2.6.18-xen/drivers/char/mem.c
---- linux-2.6.18.3/drivers/char/mem.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/char/mem.c	2006-11-19 14:26:36.000000000 +0100
-@@ -101,6 +101,7 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/char/mem.c b/drivers/char/mem.c
+--- a/drivers/char/mem.c	2007-03-16 18:49:21.000000000 +0100
++++ b/drivers/char/mem.c	2007-03-14 10:55:23.000000000 +0100
+@@ -102,6 +102,7 @@
  }
  #endif
  
@@ -43974,7 +38783,7 @@
  /*
   * This funcion reads the *physical* memory. The f_pos points directly to the 
   * memory location. 
-@@ -223,6 +224,7 @@
+@@ -224,6 +225,7 @@
  	*ppos += written;
  	return written;
  }
@@ -43982,7 +38791,7 @@
  
  #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
  static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
-@@ -776,6 +778,7 @@
+@@ -810,6 +812,7 @@
  #define open_kmem	open_mem
  #define open_oldmem	open_mem
  
@@ -43990,9 +38799,9 @@
  static const struct file_operations mem_fops = {
  	.llseek		= memory_lseek,
  	.read		= read_mem,
-@@ -783,6 +786,9 @@
- 	.mmap		= mmap_mem,
+@@ -818,6 +821,9 @@
  	.open		= open_mem,
+ 	.get_unmapped_area = get_unmapped_area_mem,
  };
 +#else
 +extern struct file_operations mem_fops;
@@ -44000,9 +38809,9 @@
  
  static const struct file_operations kmem_fops = {
  	.llseek		= memory_lseek,
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/char/tpm/Kconfig linux-2.6.18-xen/drivers/char/tpm/Kconfig
---- linux-2.6.18.3/drivers/char/tpm/Kconfig	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/char/tpm/Kconfig	2006-11-19 14:26:36.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
+--- a/drivers/char/tpm/Kconfig	2007-03-12 21:58:07.000000000 +0100
++++ b/drivers/char/tpm/Kconfig	2007-03-14 10:55:24.000000000 +0100
 @@ -31,7 +31,7 @@
  
  config TCG_NSC
@@ -44027,18 +38836,18 @@
 +	  will be called tpm_xenu.
  
 +endmenu
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/char/tpm/Makefile linux-2.6.18-xen/drivers/char/tpm/Makefile
---- linux-2.6.18.3/drivers/char/tpm/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/char/tpm/Makefile	2006-11-19 14:26:36.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
+--- a/drivers/char/tpm/Makefile	2007-03-12 21:58:07.000000000 +0100
++++ b/drivers/char/tpm/Makefile	2007-03-14 10:55:24.000000000 +0100
 @@ -9,3 +9,5 @@
  obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
  obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
  obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
 +obj-$(CONFIG_TCG_XEN) += tpm_xenu.o
 +tpm_xenu-y = tpm_xen.o tpm_vtpm.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/char/tpm/tpm.c linux-2.6.18-xen/drivers/char/tpm/tpm.c
---- linux-2.6.18.3/drivers/char/tpm/tpm.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/char/tpm/tpm.c	2006-11-19 14:26:36.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+--- a/drivers/char/tpm/tpm.c	2007-03-16 18:49:21.000000000 +0100
++++ b/drivers/char/tpm/tpm.c	2007-03-14 10:55:24.000000000 +0100
 @@ -30,7 +30,9 @@
  
  enum tpm_const {
@@ -44159,10 +38968,10 @@
  	return ret_size;
  }
  EXPORT_SYMBOL_GPL(tpm_read);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/char/tpm/tpm.h linux-2.6.18-xen/drivers/char/tpm/tpm.h
---- linux-2.6.18.3/drivers/char/tpm/tpm.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/char/tpm/tpm.h	2006-12-05 18:42:36.000000000 +0100
-@@ -61,6 +61,7 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+--- a/drivers/char/tpm/tpm.h	2007-03-16 18:49:21.000000000 +0100
++++ b/drivers/char/tpm/tpm.h	2007-03-14 10:55:24.000000000 +0100
+@@ -62,6 +62,7 @@
  	const u8 req_complete_mask;
  	const u8 req_complete_val;
  	const u8 req_canceled;
@@ -44170,7 +38979,7 @@
  	void __iomem *iobase;		/* ioremapped address */
  	unsigned long base;		/* TPM base address */
  
-@@ -94,6 +95,7 @@
+@@ -95,6 +96,7 @@
  	/* Data passed to and from the tpm via the read/write calls */
  	u8 *data_buffer;
  	atomic_t data_pending;
@@ -44178,7 +38987,7 @@
  	struct semaphore buffer_mutex;
  
  	struct timer_list user_read_timer;	/* user needs to claim result */
-@@ -121,6 +123,11 @@
+@@ -122,6 +124,11 @@
  	outb(value & 0xFF, base+1);
  }
  
@@ -44190,9 +38999,9 @@
  extern void tpm_get_timeouts(struct tpm_chip *);
  extern void tpm_gen_interrupt(struct tpm_chip *);
  extern void tpm_continue_selftest(struct tpm_chip *);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/char/tpm/tpm_vtpm.c linux-2.6.18-xen/drivers/char/tpm/tpm_vtpm.c
---- linux-2.6.18.3/drivers/char/tpm/tpm_vtpm.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/char/tpm/tpm_vtpm.c	2006-11-19 14:26:36.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/char/tpm/tpm_vtpm.c b/drivers/char/tpm/tpm_vtpm.c
+--- a/drivers/char/tpm/tpm_vtpm.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/char/tpm/tpm_vtpm.c	2007-03-14 10:55:24.000000000 +0100
 @@ -0,0 +1,547 @@
 +/*
 + * Copyright (C) 2006 IBM Corporation
@@ -44741,9 +39550,9 @@
 +	tpm_remove_hardware(dev);
 +	kfree(vtpms);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/char/tpm/tpm_vtpm.h linux-2.6.18-xen/drivers/char/tpm/tpm_vtpm.h
---- linux-2.6.18.3/drivers/char/tpm/tpm_vtpm.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/char/tpm/tpm_vtpm.h	2006-11-19 14:26:36.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/char/tpm/tpm_vtpm.h b/drivers/char/tpm/tpm_vtpm.h
+--- a/drivers/char/tpm/tpm_vtpm.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/char/tpm/tpm_vtpm.h	2007-03-14 10:55:24.000000000 +0100
 @@ -0,0 +1,68 @@
 +#ifndef TPM_VTPM_H
 +#define TPM_VTPM_H
@@ -44813,9 +39622,9 @@
 +}
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/char/tpm/tpm_xen.c linux-2.6.18-xen/drivers/char/tpm/tpm_xen.c
---- linux-2.6.18.3/drivers/char/tpm/tpm_xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/char/tpm/tpm_xen.c	2006-11-19 14:26:36.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/char/tpm/tpm_xen.c b/drivers/char/tpm/tpm_xen.c
+--- a/drivers/char/tpm/tpm_xen.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/char/tpm/tpm_xen.c	2007-03-14 10:55:24.000000000 +0100
 @@ -0,0 +1,756 @@
 +/*
 + * Copyright (c) 2005, IBM Corporation
@@ -45573,47 +40382,20 @@
 +module_init(tpmif_init);
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/char/tty_io.c linux-2.6.18-xen/drivers/char/tty_io.c
---- linux-2.6.18.3/drivers/char/tty_io.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/char/tty_io.c	2006-11-19 14:26:36.000000000 +0100
-@@ -130,6 +130,8 @@
-    vt.c for deeply disgusting hack reasons */
- DEFINE_MUTEX(tty_mutex);
- 
-+int console_use_vt = 1;
-+
- #ifdef CONFIG_UNIX98_PTYS
- extern struct tty_driver *ptm_driver;	/* Unix98 pty masters; for /dev/ptmx */
- extern int pty_limit;		/* Config limit on Unix98 ptys */
-@@ -2483,7 +2485,7 @@
- 		goto got_driver;
- 	}
- #ifdef CONFIG_VT
--	if (device == MKDEV(TTY_MAJOR,0)) {
-+	if (console_use_vt && (device == MKDEV(TTY_MAJOR,0))) {
- 		extern struct tty_driver *console_driver;
- 		driver = console_driver;
- 		index = fg_console;
-@@ -3909,6 +3911,8 @@
- #endif
- 
- #ifdef CONFIG_VT
-+	if (!console_use_vt)
-+		goto out_vt;
- 	cdev_init(&vc0_cdev, &console_fops);
- 	if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
- 	    register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
-@@ -3916,6 +3920,7 @@
- 	class_device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
+diff -x '.hg*' -x '.git*' -urN a/drivers/char/tty_io.c b/drivers/char/tty_io.c
+--- a/drivers/char/tty_io.c	2007-03-16 18:49:21.000000000 +0100
++++ b/drivers/char/tty_io.c	2007-03-14 10:55:24.000000000 +0100
+@@ -3917,6 +3917,7 @@
+ 	device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), "tty0");
  
  	vty_init();
 + out_vt:
  #endif
  	return 0;
  }
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/firmware/Kconfig linux-2.6.18-xen/drivers/firmware/Kconfig
---- linux-2.6.18.3/drivers/firmware/Kconfig	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/firmware/Kconfig	2006-11-19 14:26:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+--- a/drivers/firmware/Kconfig	2007-03-16 18:49:22.000000000 +0100
++++ b/drivers/firmware/Kconfig	2007-03-14 10:55:24.000000000 +0100
 @@ -7,7 +7,7 @@
  
  config EDD
@@ -45623,10 +40405,10 @@
  	help
  	  Say Y or M here if you want to enable BIOS Enhanced Disk Drive
  	  Services real mode BIOS calls to determine which disk
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/ide/ide-lib.c linux-2.6.18-xen/drivers/ide/ide-lib.c
---- linux-2.6.18.3/drivers/ide/ide-lib.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/ide/ide-lib.c	2006-11-19 14:26:39.000000000 +0100
-@@ -408,10 +408,10 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
+--- a/drivers/ide/ide-lib.c	2007-03-16 18:49:22.000000000 +0100
++++ b/drivers/ide/ide-lib.c	2007-03-14 10:55:24.000000000 +0100
+@@ -429,10 +429,10 @@
  {
  	u64 addr = BLK_BOUNCE_HIGH;	/* dma64_addr_t */
  
@@ -45641,20 +40423,20 @@
  			addr = HWIF(drive)->pci_dev->dma_mask;
  	}
  
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/Makefile linux-2.6.18-xen/drivers/Makefile
---- linux-2.6.18.3/drivers/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/Makefile	2006-11-19 14:26:34.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/Makefile b/drivers/Makefile
+--- a/drivers/Makefile	2007-03-16 18:49:21.000000000 +0100
++++ b/drivers/Makefile	2007-03-14 10:55:22.000000000 +0100
 @@ -31,6 +31,7 @@
  obj-$(CONFIG_NUBUS)		+= nubus/
  obj-$(CONFIG_ATM)		+= atm/
- obj-$(CONFIG_PPC_PMAC)		+= macintosh/
+ obj-y				+= macintosh/
 +obj-$(CONFIG_XEN)		+= xen/
  obj-$(CONFIG_IDE)		+= ide/
  obj-$(CONFIG_FC4)		+= fc4/
  obj-$(CONFIG_SCSI)		+= scsi/
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/oprofile/buffer_sync.c linux-2.6.18-xen/drivers/oprofile/buffer_sync.c
---- linux-2.6.18.3/drivers/oprofile/buffer_sync.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/oprofile/buffer_sync.c	2006-11-19 14:26:47.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
+--- a/drivers/oprofile/buffer_sync.c	2007-03-16 18:49:24.000000000 +0100
++++ b/drivers/oprofile/buffer_sync.c	2007-03-14 10:55:31.000000000 +0100
 @@ -6,6 +6,10 @@
   *
   * @author John Levon <levon at movementarian.org>
@@ -45775,9 +40557,9 @@
  				}
  			}
  		}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/oprofile/cpu_buffer.c linux-2.6.18-xen/drivers/oprofile/cpu_buffer.c
---- linux-2.6.18.3/drivers/oprofile/cpu_buffer.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/oprofile/cpu_buffer.c	2006-11-19 14:26:47.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
+--- a/drivers/oprofile/cpu_buffer.c	2007-03-16 18:49:24.000000000 +0100
++++ b/drivers/oprofile/cpu_buffer.c	2007-03-14 10:55:31.000000000 +0100
 @@ -6,6 +6,10 @@
   *
   * @author John Levon <levon at movementarian.org>
@@ -45887,9 +40669,9 @@
  /*
   * This serves to avoid cpu buffer overflow, and makes sure
   * the task mortuary progresses
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/oprofile/cpu_buffer.h linux-2.6.18-xen/drivers/oprofile/cpu_buffer.h
---- linux-2.6.18.3/drivers/oprofile/cpu_buffer.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/oprofile/cpu_buffer.h	2006-11-19 14:26:47.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
+--- a/drivers/oprofile/cpu_buffer.h	2007-03-16 18:49:24.000000000 +0100
++++ b/drivers/oprofile/cpu_buffer.h	2007-03-14 10:55:31.000000000 +0100
 @@ -36,7 +36,7 @@
  	volatile unsigned long tail_pos;
  	unsigned long buffer_size;
@@ -45912,9 +40694,9 @@
 +#define CPU_DOMAIN_SWITCH       4
  
  #endif /* OPROFILE_CPU_BUFFER_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/oprofile/event_buffer.h linux-2.6.18-xen/drivers/oprofile/event_buffer.h
---- linux-2.6.18.3/drivers/oprofile/event_buffer.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/oprofile/event_buffer.h	2006-11-19 14:26:47.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
+--- a/drivers/oprofile/event_buffer.h	2007-03-15 15:56:07.000000000 +0100
++++ b/drivers/oprofile/event_buffer.h	2007-03-14 10:55:31.000000000 +0100
 @@ -29,15 +29,20 @@
  #define CPU_SWITCH_CODE 		2
  #define COOKIE_SWITCH_CODE 		3
@@ -45937,9 +40719,9 @@
  /* add data to the event buffer */
  void add_event_entry(unsigned long data);
   
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/oprofile/oprof.c linux-2.6.18-xen/drivers/oprofile/oprof.c
---- linux-2.6.18.3/drivers/oprofile/oprof.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/oprofile/oprof.c	2006-11-19 14:26:47.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
+--- a/drivers/oprofile/oprof.c	2007-03-12 21:58:09.000000000 +0100
++++ b/drivers/oprofile/oprof.c	2007-03-14 10:55:31.000000000 +0100
 @@ -5,6 +5,10 @@
   * @remark Read the file COPYING
   *
@@ -45993,9 +40775,9 @@
  int oprofile_setup(void)
  {
  	int err;
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/oprofile/oprof.h linux-2.6.18-xen/drivers/oprofile/oprof.h
---- linux-2.6.18.3/drivers/oprofile/oprof.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/oprofile/oprof.h	2006-11-19 14:26:47.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
+--- a/drivers/oprofile/oprof.h	2007-03-12 21:58:09.000000000 +0100
++++ b/drivers/oprofile/oprof.h	2007-03-14 10:55:31.000000000 +0100
 @@ -35,5 +35,8 @@
  void oprofile_timer_init(struct oprofile_operations * ops);
  
@@ -46005,9 +40787,9 @@
 +int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
   
  #endif /* OPROF_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/oprofile/oprofile_files.c linux-2.6.18-xen/drivers/oprofile/oprofile_files.c
---- linux-2.6.18.3/drivers/oprofile/oprofile_files.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/oprofile/oprofile_files.c	2006-11-19 14:26:47.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
+--- a/drivers/oprofile/oprofile_files.c	2007-03-15 15:56:07.000000000 +0100
++++ b/drivers/oprofile/oprofile_files.c	2007-03-14 10:55:31.000000000 +0100
 @@ -5,15 +5,21 @@
   * @remark Read the file COPYING
   *
@@ -46241,9 +41023,9 @@
  	oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
  	oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
  	oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/pci/Kconfig linux-2.6.18-xen/drivers/pci/Kconfig
---- linux-2.6.18.3/drivers/pci/Kconfig	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/pci/Kconfig	2006-11-19 14:26:47.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/pci/Kconfig b/drivers/pci/Kconfig
+--- a/drivers/pci/Kconfig	2007-03-16 18:49:24.000000000 +0100
++++ b/drivers/pci/Kconfig	2007-03-14 10:55:32.000000000 +0100
 @@ -5,6 +5,7 @@
  	bool "Message Signaled Interrupts (MSI and MSI-X)"
  	depends on PCI
@@ -46252,9 +41034,18 @@
  	help
  	   This allows device drivers to enable MSI (Message Signaled
  	   Interrupts).  Message Signaled Interrupts enable a device to
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/serial/Kconfig linux-2.6.18-xen/drivers/serial/Kconfig
---- linux-2.6.18.3/drivers/serial/Kconfig	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/drivers/serial/Kconfig	2006-11-19 14:26:51.000000000 +0100
+@@ -55,7 +56,7 @@
+ config HT_IRQ
+ 	bool "Interrupts on hypertransport devices"
+ 	default y
+-	depends on PCI && X86_LOCAL_APIC && X86_IO_APIC
++	depends on PCI && X86_LOCAL_APIC && X86_IO_APIC && !XEN
+ 	help
+ 	   This allows native hypertransport devices to use interrupts.
+ 
+diff -x '.hg*' -x '.git*' -urN a/drivers/serial/Kconfig b/drivers/serial/Kconfig
+--- a/drivers/serial/Kconfig	2007-03-16 18:49:34.000000000 +0100
++++ b/drivers/serial/Kconfig	2007-03-14 10:55:34.000000000 +0100
 @@ -11,6 +11,7 @@
  config SERIAL_8250
  	tristate "8250/16550 and compatible serial support"
@@ -46263,9 +41054,9 @@
  	select SERIAL_CORE
  	---help---
  	  This selects whether you want to include the driver for the standard
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/balloon/balloon.c linux-2.6.18-xen/drivers/xen/balloon/balloon.c
---- linux-2.6.18.3/drivers/xen/balloon/balloon.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/balloon/balloon.c	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/balloon/balloon.c b/drivers/xen/balloon/balloon.c
+--- a/drivers/xen/balloon/balloon.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/balloon/balloon.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,637 @@
 +/******************************************************************************
 + * balloon.c
@@ -46363,8 +41154,8 @@
 +static unsigned long balloon_low, balloon_high;
 +
 +/* Main work function, always executed in process context. */
-+static void balloon_process(void *unused);
-+static DECLARE_WORK(balloon_worker, balloon_process, NULL);
++static void balloon_process(struct work_struct *unusable);
++static DECLARE_WORK(balloon_worker, balloon_process);
 +static struct timer_list balloon_timer;
 +
 +/* When ballooning out (allocating memory to return to Xen) we don't really 
@@ -46599,7 +41390,7 @@
 + * by the balloon lock), or with changes to the Xen hard limit, but we will
 + * recover from these in time.
 + */
-+static void balloon_process(void *unused)
++static void balloon_process(struct work_struct *unusable)
 +{
 +	int need_sleep = 0;
 +	long credit;
@@ -46904,15 +41695,15 @@
 +EXPORT_SYMBOL_GPL(balloon_release_driver_page);
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/balloon/Makefile linux-2.6.18-xen/drivers/xen/balloon/Makefile
---- linux-2.6.18.3/drivers/xen/balloon/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/balloon/Makefile	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/balloon/Makefile b/drivers/xen/balloon/Makefile
+--- a/drivers/xen/balloon/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/balloon/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,2 @@
 +
 +obj-y += balloon.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blkback/blkback.c linux-2.6.18-xen/drivers/xen/blkback/blkback.c
---- linux-2.6.18.3/drivers/xen/blkback/blkback.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blkback/blkback.c	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blkback/blkback.c b/drivers/xen/blkback/blkback.c
+--- a/drivers/xen/blkback/blkback.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blkback/blkback.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,549 @@
 +/******************************************************************************
 + * arch/xen/drivers/blkif/backend/main.c
@@ -47195,7 +41986,7 @@
 +	wake_up(&blkif->wq);
 +}
 +
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++irqreturn_t blkif_be_int(int irq, void *dev_id)
 +{
 +	blkif_notify_work(dev_id);
 +	return IRQ_HANDLED;
@@ -47463,9 +42254,9 @@
 +module_init(blkif_init);
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blkback/common.h linux-2.6.18-xen/drivers/xen/blkback/common.h
---- linux-2.6.18.3/drivers/xen/blkback/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blkback/common.h	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blkback/common.h b/drivers/xen/blkback/common.h
+--- a/drivers/xen/blkback/common.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blkback/common.h	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,133 @@
 +/* 
 + * This program is free software; you can redistribute it and/or
@@ -47596,13 +42387,13 @@
 +
 +void blkif_xenbus_init(void);
 +
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++irqreturn_t blkif_be_int(int irq, void *dev_id);
 +int blkif_schedule(void *arg);
 +
 +#endif /* __BLKIF__BACKEND__COMMON_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blkback/interface.c linux-2.6.18-xen/drivers/xen/blkback/interface.c
---- linux-2.6.18.3/drivers/xen/blkback/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blkback/interface.c	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blkback/interface.c b/drivers/xen/blkback/interface.c
+--- a/drivers/xen/blkback/interface.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blkback/interface.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,171 @@
 +/******************************************************************************
 + * arch/xen/drivers/blkif/backend/interface.c
@@ -47640,7 +42431,7 @@
 +#include <xen/evtchn.h>
 +#include <linux/kthread.h>
 +
-+static kmem_cache_t *blkif_cachep;
++static struct kmem_cache *blkif_cachep;
 +
 +blkif_t *blkif_alloc(domid_t domid)
 +{
@@ -47775,16 +42566,16 @@
 +	blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
 +					 0, 0, NULL, NULL);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blkback/Makefile linux-2.6.18-xen/drivers/xen/blkback/Makefile
---- linux-2.6.18.3/drivers/xen/blkback/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blkback/Makefile	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blkback/Makefile b/drivers/xen/blkback/Makefile
+--- a/drivers/xen/blkback/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blkback/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,3 @@
 +obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o
 +
 +blkbk-y	:= blkback.o xenbus.o interface.o vbd.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blkback/vbd.c linux-2.6.18-xen/drivers/xen/blkback/vbd.c
---- linux-2.6.18.3/drivers/xen/blkback/vbd.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blkback/vbd.c	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blkback/vbd.c b/drivers/xen/blkback/vbd.c
+--- a/drivers/xen/blkback/vbd.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blkback/vbd.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,119 @@
 +/******************************************************************************
 + * blkback/vbd.c
@@ -47905,9 +42696,9 @@
 + out:
 +	return rc;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blkback/xenbus.c linux-2.6.18-xen/drivers/xen/blkback/xenbus.c
---- linux-2.6.18.3/drivers/xen/blkback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blkback/xenbus.c	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blkback/xenbus.c b/drivers/xen/blkback/xenbus.c
+--- a/drivers/xen/blkback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blkback/xenbus.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,468 @@
 +/*  Xenbus code for blkif backend
 +    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
@@ -48377,10 +43168,10 @@
 +{
 +	xenbus_register_backend(&blkback);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blkfront/blkfront.c linux-2.6.18-xen/drivers/xen/blkfront/blkfront.c
---- linux-2.6.18.3/drivers/xen/blkfront/blkfront.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blkfront/blkfront.c	2006-12-05 18:42:37.000000000 +0100
-@@ -0,0 +1,846 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blkfront/blkfront.c b/drivers/xen/blkfront/blkfront.c
+--- a/drivers/xen/blkfront/blkfront.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blkfront/blkfront.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,847 @@
 +/******************************************************************************
 + * blkfront.c
 + * 
@@ -48447,8 +43238,8 @@
 +
 +static void kick_pending_request_queues(struct blkfront_info *);
 +
-+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
-+static void blkif_restart_queue(void *arg);
++static irqreturn_t blkif_int(int irq, void *dev_id);
++static void blkif_restart_queue(struct work_struct *work);
 +static void blkif_recover(struct blkfront_info *);
 +static void blkif_completion(struct blk_shadow *);
 +static void blkif_free(struct blkfront_info *, int);
@@ -48483,7 +43274,7 @@
 +	info->xbdev = dev;
 +	info->vdevice = vdevice;
 +	info->connected = BLKIF_STATE_DISCONNECTED;
-+	INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
++	INIT_WORK(&info->work, blkif_restart_queue);
 +
 +	for (i = 0; i < BLK_RING_SIZE; i++)
 +		info->shadow[i].req.id = i+1;
@@ -48801,9 +43592,10 @@
 +	}
 +}
 +
-+static void blkif_restart_queue(void *arg)
++static void blkif_restart_queue(struct work_struct *work)
 +{
-+	struct blkfront_info *info = (struct blkfront_info *)arg;
++	struct blkfront_info *info = container_of(work, struct blkfront_info, work);
++
 +	spin_lock_irq(&blkif_io_lock);
 +	if (info->connected == BLKIF_STATE_CONNECTED)
 +		kick_pending_request_queues(info);
@@ -48997,9 +43789,9 @@
 +		if (RING_FULL(&info->ring))
 +			goto wait;
 +
-+		DPRINTK("do_blk_req %p: cmd %p, sec %lx, "
++		DPRINTK("do_blk_req %p: cmd %p, sec %llx, "
 +			"(%u/%li) buffer:%p [%s]\n",
-+			req, req->cmd, req->sector, req->current_nr_sectors,
++			req, req->cmd, (u64)req->sector, req->current_nr_sectors,
 +			req->nr_sectors, req->buffer,
 +			rq_data_dir(req) ? "write" : "read");
 +
@@ -49021,7 +43813,7 @@
 +}
 +
 +
-+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++static irqreturn_t blkif_int(int irq, void *dev_id)
 +{
 +	struct request *req;
 +	blkif_response_t *bret;
@@ -49227,9 +44019,9 @@
 +module_exit(xlblk_exit);
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blkfront/block.h linux-2.6.18-xen/drivers/xen/blkfront/block.h
---- linux-2.6.18.3/drivers/xen/blkfront/block.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blkfront/block.h	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blkfront/block.h b/drivers/xen/blkfront/block.h
+--- a/drivers/xen/blkfront/block.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blkfront/block.h	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,154 @@
 +/******************************************************************************
 + * block.h
@@ -49385,9 +44177,9 @@
 +void xlvbd_del(struct blkfront_info *info);
 +
 +#endif /* __XEN_DRIVERS_BLOCK_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blkfront/Kconfig linux-2.6.18-xen/drivers/xen/blkfront/Kconfig
---- linux-2.6.18.3/drivers/xen/blkfront/Kconfig	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blkfront/Kconfig	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blkfront/Kconfig b/drivers/xen/blkfront/Kconfig
+--- a/drivers/xen/blkfront/Kconfig	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blkfront/Kconfig	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,6 @@
 +
 +config XENBLOCK
@@ -49395,18 +44187,18 @@
 +	depends on ARCH_XEN
 +	help
 +	  Block device driver for Xen
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blkfront/Makefile linux-2.6.18-xen/drivers/xen/blkfront/Makefile
---- linux-2.6.18.3/drivers/xen/blkfront/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blkfront/Makefile	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blkfront/Makefile b/drivers/xen/blkfront/Makefile
+--- a/drivers/xen/blkfront/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blkfront/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,5 @@
 +
 +obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	:= xenblk.o
 +
 +xenblk-objs := blkfront.o vbd.o
 +
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blkfront/vbd.c linux-2.6.18-xen/drivers/xen/blkfront/vbd.c
---- linux-2.6.18.3/drivers/xen/blkfront/vbd.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blkfront/vbd.c	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blkfront/vbd.c b/drivers/xen/blkfront/vbd.c
+--- a/drivers/xen/blkfront/vbd.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blkfront/vbd.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,318 @@
 +/******************************************************************************
 + * vbd.c
@@ -49726,10 +44518,10 @@
 +	blk_cleanup_queue(info->rq);
 +	info->rq = NULL;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blktap/blktapmain.c linux-2.6.18-xen/drivers/xen/blktap/blktapmain.c
---- linux-2.6.18.3/drivers/xen/blktap/blktapmain.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blktap/blktapmain.c	2006-12-05 18:42:37.000000000 +0100
-@@ -0,0 +1,1393 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blktap/blktapmain.c b/drivers/xen/blktap/blktapmain.c
+--- a/drivers/xen/blktap/blktapmain.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blktap/blktapmain.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,1389 @@
 +/******************************************************************************
 + * drivers/xen/blktap/blktap.c
 + * 
@@ -49780,6 +44572,7 @@
 +#include <linux/major.h>
 +#include <linux/gfp.h>
 +#include <linux/poll.h>
++#include <linux/init.h>
 +#include <asm/tlbflush.h>
 +
 +#define MAX_TAP_DEV 100     /*the maximum number of tapdisk ring devices    */
@@ -49829,6 +44622,8 @@
 +         ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +        \
 +         ((_seg) * PAGE_SIZE))
 +static int blkif_reqs = MAX_PENDING_REQS;
++module_param(blkif_reqs, int, 0);
++
 +static int mmap_pages = MMAP_PAGES;
 +
 +#define RING_PAGES 1 /* BLKTAP - immediately before the mmap area, we
@@ -49866,13 +44661,6 @@
 +static domid_translate_t  translate_domid[MAX_TAP_DEV];
 +static tap_blkif_t *tapfds[MAX_TAP_DEV];
 +
-+static int __init set_blkif_reqs(char *str)
-+{
-+	get_option(&str, &blkif_reqs);
-+	return 1;
-+}
-+__setup("blkif_reqs=", set_blkif_reqs);
-+
 +/* Run-time switchable: /sys/module/blktap/parameters/ */
 +static unsigned int log_stats = 0;
 +static unsigned int debug_lvl = 0;
@@ -50746,7 +45534,7 @@
 +	wake_up(&blkif->wq);
 +}
 +
-+irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id)
 +{
 +	blkif_notify_work(dev_id);
 +	return IRQ_HANDLED;
@@ -51123,9 +45911,9 @@
 +module_init(blkif_init);
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blktap/common.h linux-2.6.18-xen/drivers/xen/blktap/common.h
---- linux-2.6.18.3/drivers/xen/blktap/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blktap/common.h	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blktap/common.h b/drivers/xen/blktap/common.h
+--- a/drivers/xen/blktap/common.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blktap/common.h	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,120 @@
 +/* 
 + * This program is free software; you can redistribute it and/or
@@ -51240,16 +46028,16 @@
 +
 +void tap_blkif_xenbus_init(void);
 +
-+irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id);
 +int tap_blkif_schedule(void *arg);
 +
 +int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif);
 +void signal_tapdisk(int idx);
 +
 +#endif /* __BLKIF__BACKEND__COMMON_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blktap/interface.c linux-2.6.18-xen/drivers/xen/blktap/interface.c
---- linux-2.6.18.3/drivers/xen/blktap/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blktap/interface.c	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blktap/interface.c b/drivers/xen/blktap/interface.c
+--- a/drivers/xen/blktap/interface.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blktap/interface.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,164 @@
 +/******************************************************************************
 + * drivers/xen/blktap/interface.c
@@ -51287,7 +46075,7 @@
 +#include "common.h"
 +#include <xen/evtchn.h>
 +
-+static kmem_cache_t *blkif_cachep;
++static struct kmem_cache *blkif_cachep;
 +
 +blkif_t *tap_alloc_blkif(domid_t domid)
 +{
@@ -51415,9 +46203,9 @@
 +	blkif_cachep = kmem_cache_create("blktapif_cache", sizeof(blkif_t), 
 +					 0, 0, NULL, NULL);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blktap/Makefile linux-2.6.18-xen/drivers/xen/blktap/Makefile
---- linux-2.6.18.3/drivers/xen/blktap/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blktap/Makefile	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blktap/Makefile b/drivers/xen/blktap/Makefile
+--- a/drivers/xen/blktap/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blktap/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,6 @@
 +LINUXINCLUDE += -I../xen/include/public/io
 +
@@ -51425,9 +46213,9 @@
 +
 +blktap-y	:= xenbus.o interface.o blktapmain.o 
 +
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/blktap/xenbus.c linux-2.6.18-xen/drivers/xen/blktap/xenbus.c
---- linux-2.6.18.3/drivers/xen/blktap/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/blktap/xenbus.c	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/blktap/xenbus.c b/drivers/xen/blktap/xenbus.c
+--- a/drivers/xen/blktap/xenbus.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/blktap/xenbus.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,366 @@
 +/* drivers/xen/blktap/xenbus.c
 + *
@@ -51795,15 +46583,15 @@
 +{
 +	xenbus_register_backend(&blktap);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/char/Makefile linux-2.6.18-xen/drivers/xen/char/Makefile
---- linux-2.6.18.3/drivers/xen/char/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/char/Makefile	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/char/Makefile b/drivers/xen/char/Makefile
+--- a/drivers/xen/char/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/char/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,2 @@
 +
 +obj-y	:= mem.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/char/mem.c linux-2.6.18-xen/drivers/xen/char/mem.c
---- linux-2.6.18.3/drivers/xen/char/mem.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/char/mem.c	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/char/mem.c b/drivers/xen/char/mem.c
+--- a/drivers/xen/char/mem.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/char/mem.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,180 @@
 +/*
 + *  Originally from linux/drivers/char/mem.c
@@ -51985,10 +46773,10 @@
 +	.mmap		= mmap_mem,
 +	.open		= open_mem,
 +};
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/console/console.c linux-2.6.18-xen/drivers/xen/console/console.c
---- linux-2.6.18.3/drivers/xen/console/console.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/console/console.c	2006-11-19 14:26:56.000000000 +0100
-@@ -0,0 +1,688 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/console/console.c b/drivers/xen/console/console.c
+--- a/drivers/xen/console/console.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/console/console.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,714 @@
 +/******************************************************************************
 + * console.c
 + * 
@@ -52048,6 +46836,7 @@
 +#include <xen/interface/event_channel.h>
 +#include <asm/hypervisor.h>
 +#include <xen/evtchn.h>
++#include <xen/xenbus.h>
 +#include <xen/xencons.h>
 +
 +/*
@@ -52055,17 +46844,23 @@
 + *  'xencons=off'  [XC_OFF]:     Console is disabled.
 + *  'xencons=tty'  [XC_TTY]:     Console attached to '/dev/tty[0-9]+'.
 + *  'xencons=ttyS' [XC_SERIAL]:  Console attached to '/dev/ttyS[0-9]+'.
++ *  'xencons=xvc'  [XC_XVC]:     Console attached to '/dev/xvc0'.
 + *                 [XC_DEFAULT]: DOM0 -> XC_SERIAL ; all others -> XC_TTY.
 + * 
 + * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
 + * warnings from standard distro startup scripts.
 + */
-+static enum { XC_OFF, XC_DEFAULT, XC_TTY, XC_SERIAL } xc_mode = XC_DEFAULT;
++static enum {
++	XC_OFF, XC_DEFAULT, XC_TTY, XC_SERIAL, XC_XVC
++} xc_mode = XC_DEFAULT;
 +static int xc_num = -1;
 +
++/* /dev/xvc0 device number allocated by lanana.org. */
++#define XEN_XVC_MAJOR 204
++#define XEN_XVC_MINOR 191
++
 +#ifdef CONFIG_MAGIC_SYSRQ
 +static unsigned long sysrq_requested;
-+extern int sysrq_enabled;
 +#endif
 +
 +static int __init xencons_setup(char *str)
@@ -52073,28 +46868,24 @@
 +	char *q;
 +	int n;
 +
-+	if (!strncmp(str, "ttyS", 4))
++	if (!strncmp(str, "ttyS", 4)) {
 +		xc_mode = XC_SERIAL;
-+	else if (!strncmp(str, "tty", 3))
++		str += 4;
++	} else if (!strncmp(str, "tty", 3)) {
 +		xc_mode = XC_TTY;
-+	else if (!strncmp(str, "off", 3))
-+		xc_mode = XC_OFF;
-+
-+	switch (xc_mode) {
-+	case XC_SERIAL:
-+		n = simple_strtol(str+4, &q, 10);
-+		if (q > (str + 4))
-+			xc_num = n;
-+		break;
-+	case XC_TTY:
-+		n = simple_strtol(str+3, &q, 10);
-+		if (q > (str + 3))
-+			xc_num = n;
-+		break;
-+	default:
-+		break;
++		str += 3;
++	} else if (!strncmp(str, "xvc", 3)) {
++		xc_mode = XC_XVC;
++		str += 3;
++	} else if (!strncmp(str, "off", 3)) {
++		xc_mode = XC_OFF;
++		str += 3;
 +	}
 +
++	n = simple_strtol(str, &q, 10);
++	if (q != str)
++		xc_num = n;
++
 +	return 1;
 +}
 +__setup("xencons=", xencons_setup);
@@ -52187,11 +46978,17 @@
 +		if (!xen_start_info->console.domU.evtchn)
 +			goto out;
 +		if (xc_mode == XC_DEFAULT)
-+			xc_mode = XC_TTY;
++			xc_mode = XC_XVC;
 +		kcons_info.write = kcons_write;
 +	}
 +
 +	switch (xc_mode) {
++	case XC_XVC:
++		strcpy(kcons_info.name, "xvc");
++		if (xc_num == -1)
++			xc_num = 0;
++		break;
++
 +	case XC_SERIAL:
 +		strcpy(kcons_info.name, "ttyS");
 +		if (xc_num == -1)
@@ -52296,16 +47093,16 @@
 +/******************** User-space console driver (/dev/console) ************/
 +
 +#define DRV(_d)         (_d)
-+#define DUMMY_TTY(_tty) ((xc_mode != XC_SERIAL) &&		\
++#define DUMMY_TTY(_tty) ((xc_mode == XC_TTY) &&		\
 +			 ((_tty)->index != (xc_num - 1)))
 +
-+static struct termios *xencons_termios[MAX_NR_CONSOLES];
-+static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
++static struct ktermios *xencons_termios[MAX_NR_CONSOLES];
++static struct ktermios *xencons_termios_locked[MAX_NR_CONSOLES];
 +static struct tty_struct *xencons_tty;
 +static int xencons_priv_irq;
 +static char x_char;
 +
-+void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
++void xencons_rx(char *buf, unsigned len)
 +{
 +	int           i;
 +	unsigned long flags;
@@ -52316,7 +47113,7 @@
 +
 +	for (i = 0; i < len; i++) {
 +#ifdef CONFIG_MAGIC_SYSRQ
-+		if (sysrq_enabled) {
++		if (sysrq_on()) {
 +			if (buf[i] == '\x0f') { /* ^O */
 +				sysrq_requested = jiffies;
 +				continue; /* don't print the sysrq key */
@@ -52328,7 +47125,7 @@
 +					spin_unlock_irqrestore(
 +						&xencons_lock, flags);
 +					handle_sysrq(
-+						buf[i], regs, xencons_tty);
++						buf[i], xencons_tty);
 +					spin_lock_irqsave(
 +						&xencons_lock, flags);
 +					continue;
@@ -52393,14 +47190,13 @@
 +}
 +
 +/* Privileged receive callback and transmit kicker. */
-+static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
-+					  struct pt_regs *regs)
++static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id)
 +{
 +	static char rbuf[16];
 +	int         l;
 +
 +	while ((l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0)
-+		xencons_rx(rbuf, l, regs);
++		xencons_rx(rbuf, l);
 +
 +	xencons_tx();
 +
@@ -52619,8 +47415,8 @@
 +			return rc;
 +	}
 +
-+	xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ?
-+					  1 : MAX_NR_CONSOLES);
++	xencons_driver = alloc_tty_driver((xc_mode == XC_TTY) ?
++					  MAX_NR_CONSOLES : 1);
 +	if (xencons_driver == NULL)
 +		return -ENOMEM;
 +
@@ -52635,14 +47431,23 @@
 +	DRV(xencons_driver)->termios         = xencons_termios;
 +	DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
 +
-+	if (xc_mode == XC_SERIAL) {
++	switch (xc_mode) {
++	case XC_XVC:
++		DRV(xencons_driver)->name        = "xvc";
++		DRV(xencons_driver)->major       = XEN_XVC_MAJOR;
++		DRV(xencons_driver)->minor_start = XEN_XVC_MINOR;
++		DRV(xencons_driver)->name_base   = xc_num;
++		break;
++	case XC_SERIAL:
 +		DRV(xencons_driver)->name        = "ttyS";
 +		DRV(xencons_driver)->minor_start = 64 + xc_num;
-+		DRV(xencons_driver)->name_base   = 0 + xc_num;
-+	} else {
++		DRV(xencons_driver)->name_base   = xc_num;
++		break;
++	default:
 +		DRV(xencons_driver)->name        = "tty";
 +		DRV(xencons_driver)->minor_start = 1;
 +		DRV(xencons_driver)->name_base   = 1;
++		break;
 +	}
 +
 +	tty_set_operations(xencons_driver, &xencons_ops);
@@ -52671,21 +47476,30 @@
 +	printk("Xen virtual console successfully installed as %s%d\n",
 +	       DRV(xencons_driver)->name, xc_num);
 +
++        /* Check about framebuffer messing up the console */
++        if (!is_initial_xendomain() &&
++	    !xenbus_exists(XBT_NIL, "device", "vfb")) {
++		/* FIXME: this is ugly */
++		unregister_console(&kcons_info);
++		kcons_info.flags |= CON_CONSDEV;
++		register_console(&kcons_info);
++	}
++
 +	return 0;
 +}
 +
 +module_init(xencons_init);
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/console/Makefile linux-2.6.18-xen/drivers/xen/console/Makefile
---- linux-2.6.18.3/drivers/xen/console/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/console/Makefile	2006-11-19 14:26:55.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/console/Makefile b/drivers/xen/console/Makefile
+--- a/drivers/xen/console/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/console/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,2 @@
 +
 +obj-y	:= console.o xencons_ring.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/console/xencons_ring.c linux-2.6.18-xen/drivers/xen/console/xencons_ring.c
---- linux-2.6.18.3/drivers/xen/console/xencons_ring.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/console/xencons_ring.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/console/xencons_ring.c b/drivers/xen/console/xencons_ring.c
+--- a/drivers/xen/console/xencons_ring.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/console/xencons_ring.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,143 @@
 +/* 
 + * This program is free software; you can redistribute it and/or
@@ -52772,7 +47586,7 @@
 +	return sent;
 +}
 +
-+static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
++static irqreturn_t handle_input(int irq, void *unused)
 +{
 +	struct xencons_interface *intf = xencons_interface();
 +	XENCONS_RING_IDX cons, prod;
@@ -52783,7 +47597,7 @@
 +	BUG_ON((prod - cons) > sizeof(intf->in));
 +
 +	while (cons != prod) {
-+		xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1, regs);
++		xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1);
 +		cons++;
 +	}
 +
@@ -52830,9 +47644,9 @@
 +{
 +	(void)xencons_ring_init();
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/core/cpu_hotplug.c linux-2.6.18-xen/drivers/xen/core/cpu_hotplug.c
---- linux-2.6.18.3/drivers/xen/core/cpu_hotplug.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/core/cpu_hotplug.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/core/cpu_hotplug.c b/drivers/xen/core/cpu_hotplug.c
+--- a/drivers/xen/core/cpu_hotplug.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/core/cpu_hotplug.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,188 @@
 +
 +#include <linux/init.h>
@@ -53022,10 +47836,10 @@
 +{
 +	xenbus_allowed_cpumask = cpu_present_map;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/core/evtchn.c linux-2.6.18-xen/drivers/xen/core/evtchn.c
---- linux-2.6.18.3/drivers/xen/core/evtchn.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/core/evtchn.c	2006-11-19 14:26:56.000000000 +0100
-@@ -0,0 +1,868 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/core/evtchn.c b/drivers/xen/core/evtchn.c
+--- a/drivers/xen/core/evtchn.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/core/evtchn.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,840 @@
 +/******************************************************************************
 + * evtchn.c
 + * 
@@ -53401,7 +48215,7 @@
 +
 +int bind_evtchn_to_irqhandler(
 +	unsigned int evtchn,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	irq_handler_t handler,
 +	unsigned long irqflags,
 +	const char *devname,
 +	void *dev_id)
@@ -53423,7 +48237,7 @@
 +int bind_virq_to_irqhandler(
 +	unsigned int virq,
 +	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	irq_handler_t handler,
 +	unsigned long irqflags,
 +	const char *devname,
 +	void *dev_id)
@@ -53445,7 +48259,7 @@
 +int bind_ipi_to_irqhandler(
 +	unsigned int ipi,
 +	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	irq_handler_t handler,
 +	unsigned long irqflags,
 +	const char *devname,
 +	void *dev_id)
@@ -53500,13 +48314,12 @@
 +	rebind_irq_to_cpu(irq, tcpu);
 +}
 +
-+static int retrigger(unsigned int irq)
++static int retrigger_vector(unsigned int irq)
 +{
 +	int evtchn = evtchn_from_irq(irq);
 +	shared_info_t *s = HYPERVISOR_shared_info;
 +	if (!VALID_EVTCHN(evtchn))
 +		return 1;
-+	BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
 +	synch_set_bit(evtchn, &s->evtchn_pending[0]);
 +	return 1;
 +}
@@ -53515,7 +48328,7 @@
 + * Interface to generic handling in irq.c
 + */
 +
-+static unsigned int startup_dynirq(unsigned int irq)
++static unsigned int startup_dynirq_vector(unsigned int irq)
 +{
 +	int evtchn = evtchn_from_irq(irq);
 +
@@ -53524,15 +48337,7 @@
 +	return 0;
 +}
 +
-+static void shutdown_dynirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn))
-+		mask_evtchn(evtchn);
-+}
-+
-+static void enable_dynirq(unsigned int irq)
++static void unmask_dynirq_vector(unsigned int irq)
 +{
 +	int evtchn = evtchn_from_irq(irq);
 +
@@ -53540,7 +48345,7 @@
 +		unmask_evtchn(evtchn);
 +}
 +
-+static void disable_dynirq(unsigned int irq)
++static void mask_dynirq_vector(unsigned int irq)
 +{
 +	int evtchn = evtchn_from_irq(irq);
 +
@@ -53548,7 +48353,7 @@
 +		mask_evtchn(evtchn);
 +}
 +
-+static void ack_dynirq(unsigned int irq)
++static void ack_dynirq_vector(unsigned int irq)
 +{
 +	int evtchn = evtchn_from_irq(irq);
 +
@@ -53560,7 +48365,7 @@
 +	}
 +}
 +
-+static void end_dynirq(unsigned int irq)
++static void ack_dynirq_quirk_vector(unsigned int irq)
 +{
 +	int evtchn = evtchn_from_irq(irq);
 +
@@ -53568,18 +48373,17 @@
 +		unmask_evtchn(evtchn);
 +}
 +
-+static struct hw_interrupt_type dynirq_type = {
-+	.typename	= "Dynamic-irq",
-+	.startup	= startup_dynirq,
-+	.shutdown	= shutdown_dynirq,
-+	.enable		= enable_dynirq,
-+	.disable	= disable_dynirq,
-+	.ack		= ack_dynirq,
-+	.end		= end_dynirq,
++static struct irq_chip dynirq_chip = {
++	.name		= "Dynamic-irq",
++	.startup	= startup_dynirq_vector,
++	.mask		= mask_dynirq_vector,
++	.unmask		= unmask_dynirq_vector,
++	.ack		= ack_dynirq_vector,
++	.eoi		= ack_dynirq_quirk_vector,
 +#ifdef CONFIG_SMP
 +	.set_affinity	= set_affinity_irq,
 +#endif
-+	.retrigger	= retrigger,
++	.retrigger	= retrigger_vector,
 +};
 +
 +static inline void pirq_unmask_notify(int pirq)
@@ -53605,7 +48409,7 @@
 + */
 +#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
 +
-+static unsigned int startup_pirq(unsigned int irq)
++static unsigned int startup_pirq_vector(unsigned int irq)
 +{
 +	struct evtchn_bind_pirq bind_pirq;
 +	int evtchn = evtchn_from_irq(irq);
@@ -53637,26 +48441,7 @@
 +	return 0;
 +}
 +
-+static void shutdown_pirq(unsigned int irq)
-+{
-+	struct evtchn_close close;
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (!VALID_EVTCHN(evtchn))
-+		return;
-+
-+	mask_evtchn(evtchn);
-+
-+	close.port = evtchn;
-+	if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
-+		BUG();
-+
-+	bind_evtchn_to_cpu(evtchn, 0);
-+	evtchn_to_irq[evtchn] = -1;
-+	irq_info[irq] = IRQ_UNBOUND;
-+}
-+
-+static void enable_pirq(unsigned int irq)
++static void unmask_pirq_vector(unsigned int irq)
 +{
 +	int evtchn = evtchn_from_irq(irq);
 +
@@ -53666,7 +48451,7 @@
 +	}
 +}
 +
-+static void disable_pirq(unsigned int irq)
++static void mask_pirq_vector(unsigned int irq)
 +{
 +	int evtchn = evtchn_from_irq(irq);
 +
@@ -53674,7 +48459,7 @@
 +		mask_evtchn(evtchn);
 +}
 +
-+static void ack_pirq(unsigned int irq)
++static void ack_pirq_vector(unsigned int irq)
 +{
 +	int evtchn = evtchn_from_irq(irq);
 +
@@ -53686,7 +48471,7 @@
 +	}
 +}
 +
-+static void end_pirq(unsigned int irq)
++static void ack_pirq_quirk_vector(unsigned int irq)
 +{
 +	int evtchn = evtchn_from_irq(irq);
 +
@@ -53696,18 +48481,17 @@
 +	}
 +}
 +
-+static struct hw_interrupt_type pirq_type = {
-+	.typename	= "Phys-irq",
-+	.startup	= startup_pirq,
-+	.shutdown	= shutdown_pirq,
-+	.enable		= enable_pirq,
-+	.disable	= disable_pirq,
-+	.ack		= ack_pirq,
-+	.end		= end_pirq,
++static struct  irq_chip pirq_chip = {
++	.name		= "Phys-irq",
++	.startup	= startup_pirq_vector,
++	.mask		= mask_pirq_vector,
++	.unmask		= unmask_pirq_vector,
++	.ack		= ack_pirq_vector,
++	.eoi		= ack_pirq_quirk_vector,
 +#ifdef CONFIG_SMP
 +	.set_affinity	= set_affinity_irq,
 +#endif
-+	.retrigger	= retrigger,
++	.retrigger	= retrigger_vector,
 +};
 +
 +int irq_ignore_unhandled(unsigned int irq)
@@ -53875,7 +48659,8 @@
 +		irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
 +		irq_desc[dynirq_to_irq(i)].action  = NULL;
 +		irq_desc[dynirq_to_irq(i)].depth   = 1;
-+		irq_desc[dynirq_to_irq(i)].chip    = &dynirq_type;
++		set_irq_chip_and_handler_name(dynirq_to_irq(i), &dynirq_chip,
++					      handle_level_irq, "level");
 +	}
 +
 +	/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
@@ -53891,12 +48676,13 @@
 +		irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
 +		irq_desc[pirq_to_irq(i)].action  = NULL;
 +		irq_desc[pirq_to_irq(i)].depth   = 1;
-+		irq_desc[pirq_to_irq(i)].chip    = &pirq_type;
++		set_irq_chip_and_handler_name(pirq_to_irq(i), &pirq_chip,
++					      handle_level_irq, "level");
 +	}
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/core/features.c linux-2.6.18-xen/drivers/xen/core/features.c
---- linux-2.6.18.3/drivers/xen/core/features.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/core/features.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/core/features.c b/drivers/xen/core/features.c
+--- a/drivers/xen/core/features.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/core/features.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,30 @@
 +/******************************************************************************
 + * features.c
@@ -53928,9 +48714,9 @@
 +			xen_features[i*32+j] = !!(fi.submap & 1<<j);
 +	}
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/core/gnttab.c linux-2.6.18-xen/drivers/xen/core/gnttab.c
---- linux-2.6.18.3/drivers/xen/core/gnttab.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/core/gnttab.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/core/gnttab.c b/drivers/xen/core/gnttab.c
+--- a/drivers/xen/core/gnttab.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/core/gnttab.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,483 @@
 +/******************************************************************************
 + * gnttab.c
@@ -54415,9 +49201,9 @@
 +#ifdef CONFIG_XEN
 +core_initcall(gnttab_init);
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/core/hypervisor_sysfs.c linux-2.6.18-xen/drivers/xen/core/hypervisor_sysfs.c
---- linux-2.6.18.3/drivers/xen/core/hypervisor_sysfs.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/core/hypervisor_sysfs.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/core/hypervisor_sysfs.c b/drivers/xen/core/hypervisor_sysfs.c
+--- a/drivers/xen/core/hypervisor_sysfs.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/core/hypervisor_sysfs.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,59 @@
 +/*
 + *  copyright (c) 2006 IBM Corporation
@@ -54478,9 +49264,9 @@
 +
 +device_initcall(hypervisor_subsys_init);
 +EXPORT_SYMBOL_GPL(hypervisor_subsys);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/core/Makefile linux-2.6.18-xen/drivers/xen/core/Makefile
---- linux-2.6.18.3/drivers/xen/core/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/core/Makefile	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/core/Makefile b/drivers/xen/core/Makefile
+--- a/drivers/xen/core/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/core/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,13 @@
 +#
 +# Makefile for the linux kernel.
@@ -54495,10 +49281,10 @@
 +obj-$(CONFIG_XEN_SKBUFF)	+= skbuff.o
 +obj-$(CONFIG_XEN_REBOOT)	+= reboot.o
 +obj-$(CONFIG_XEN_SMPBOOT)	+= smpboot.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/core/reboot.c linux-2.6.18-xen/drivers/xen/core/reboot.c
---- linux-2.6.18.3/drivers/xen/core/reboot.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/core/reboot.c	2006-11-19 14:26:56.000000000 +0100
-@@ -0,0 +1,384 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/core/reboot.c b/drivers/xen/core/reboot.c
+--- a/drivers/xen/core/reboot.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/core/reboot.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,385 @@
 +#define __KERNEL_SYSCALLS__
 +#include <linux/version.h>
 +#include <linux/kernel.h>
@@ -54506,6 +49292,7 @@
 +#include <linux/unistd.h>
 +#include <linux/module.h>
 +#include <linux/reboot.h>
++#include <linux/syscalls.h>
 +#include <linux/sysrq.h>
 +#include <linux/stringify.h>
 +#include <asm/irq.h>
@@ -54579,8 +49366,8 @@
 +
 +/* Ignore multiple shutdown requests. */
 +static int shutting_down = SHUTDOWN_INVALID;
-+static void __shutdown_handler(void *unused);
-+static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
++static void __shutdown_handler(struct work_struct *work);
++static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler);
 +
 +#if defined(__i386__) || defined(__x86_64__)
 +
@@ -54723,7 +49510,7 @@
 +
 +	if ((shutting_down == SHUTDOWN_POWEROFF) ||
 +	    (shutting_down == SHUTDOWN_HALT)) {
-+		if (execve("/sbin/poweroff", poweroff_argv, envp) < 0) {
++		if (kernel_execve("/sbin/poweroff", poweroff_argv, envp) < 0) {
 +			sys_reboot(LINUX_REBOOT_MAGIC1,
 +				   LINUX_REBOOT_MAGIC2,
 +				   LINUX_REBOOT_CMD_POWER_OFF,
@@ -54750,7 +49537,7 @@
 +	return 0;
 +}
 +
-+static void __shutdown_handler(void *unused)
++static void __shutdown_handler(struct work_struct *unused)
 +{
 +	int err;
 +
@@ -54810,7 +49597,7 @@
 +	}
 +
 +	if (shutting_down != SHUTDOWN_INVALID)
-+		schedule_work(&shutdown_work);
++		schedule_delayed_work(&shutdown_work, 0);
 +
 +	kfree(str);
 +}
@@ -54842,7 +49629,7 @@
 +
 +#ifdef CONFIG_MAGIC_SYSRQ
 +	if (sysrq_key != '\0')
-+		handle_sysrq(sysrq_key, NULL, NULL);
++		handle_sysrq(sysrq_key, NULL);
 +#endif
 +}
 +
@@ -54883,9 +49670,9 @@
 +}
 +
 +subsys_initcall(setup_shutdown_event);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/core/skbuff.c linux-2.6.18-xen/drivers/xen/core/skbuff.c
---- linux-2.6.18.3/drivers/xen/core/skbuff.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/core/skbuff.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/core/skbuff.c b/drivers/xen/core/skbuff.c
+--- a/drivers/xen/core/skbuff.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/core/skbuff.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,144 @@
 +
 +#include <linux/module.h>
@@ -54903,7 +49690,7 @@
 +#include <asm/hypervisor.h>
 +
 +/* Referenced in netback.c. */
-+/*static*/ kmem_cache_t *skbuff_cachep;
++/*static*/ struct kmem_cache *skbuff_cachep;
 +EXPORT_SYMBOL(skbuff_cachep);
 +
 +/* Allow up to 64kB or page-sized packets (whichever is greater). */
@@ -54912,18 +49699,18 @@
 +#else
 +#define MAX_SKBUFF_ORDER 0
 +#endif
-+static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
++static struct kmem_cache *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
 +
 +static struct {
 +	int size;
-+	kmem_cache_t *cachep;
++	struct kmem_cache *cachep;
 +} skbuff_small[] = { { 512, NULL }, { 2048, NULL } };
 +
 +struct sk_buff *__alloc_skb(unsigned int length, gfp_t gfp_mask,
-+			    int fclone)
++			    int fclone, int node)
 +{
 +	int order, i;
-+	kmem_cache_t *cachep;
++	struct kmem_cache *cachep;
 +
 +	length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);
 +
@@ -54967,7 +49754,7 @@
 +	return skb;
 +}
 +
-+static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
++static void skbuff_ctor(void *buf, struct kmem_cache *cachep, unsigned long unused)
 +{
 +	int order = 0;
 +
@@ -54982,7 +49769,7 @@
 +	scrub_pages(buf, 1 << order);
 +}
 +
-+static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
++static void skbuff_dtor(void *buf, struct kmem_cache *cachep, unsigned long unused)
 +{
 +	int order = 0;
 +
@@ -55031,10 +49818,10 @@
 +core_initcall(skbuff_init);
 +
 +EXPORT_SYMBOL(__dev_alloc_skb);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/core/smpboot.c linux-2.6.18-xen/drivers/xen/core/smpboot.c
---- linux-2.6.18.3/drivers/xen/core/smpboot.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/core/smpboot.c	2006-11-19 14:26:56.000000000 +0100
-@@ -0,0 +1,429 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/core/smpboot.c b/drivers/xen/core/smpboot.c
+--- a/drivers/xen/core/smpboot.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/core/smpboot.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,481 @@
 +/*
 + *	Xen SMP booting functions
 + *
@@ -55057,13 +49844,16 @@
 +#include <asm/desc.h>
 +#include <asm/arch_hooks.h>
 +#include <asm/pgalloc.h>
++#if defined(__i386__)
++#include <asm/pda.h>
++#endif
 +#include <xen/evtchn.h>
 +#include <xen/interface/vcpu.h>
 +#include <xen/cpu_hotplug.h>
 +#include <xen/xenbus.h>
 +
-+extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
-+extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
++extern irqreturn_t smp_reschedule_interrupt(int, void *);
++extern irqreturn_t smp_call_function_interrupt(int, void *);
 +
 +extern void local_setup_timer(unsigned int cpu);
 +extern void local_teardown_timer(unsigned int cpu);
@@ -55112,8 +49902,6 @@
 +#if defined(__i386__)
 +u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
 +EXPORT_SYMBOL(x86_cpu_to_apicid);
-+#elif !defined(CONFIG_X86_IO_APIC)
-+unsigned int maxcpus = NR_CPUS;
 +#endif
 +
 +void __init prefill_possible_map(void)
@@ -55182,9 +49970,24 @@
 +}
 +#endif
 +
++#ifdef __i386__
++static inline void set_kernel_gs(void)
++{
++	/* Set %gs for this CPU's PDA.  Memory clobber is to create a
++	   barrier with respect to any PDA operations, so the compiler
++	   doesn't move any before here. */
++	asm volatile ("mov %0, %%gs" : : "r" (__KERNEL_PDA) : "memory");
++}
++#endif
++
 +void cpu_bringup(void)
 +{
++#ifdef __i386__
++	set_kernel_gs();
++	secondary_cpu_init();
++#else
 +	cpu_init();
++#endif
 +	touch_softlockup_watchdog();
 +	preempt_disable();
 +	local_irq_enable();
@@ -55296,6 +50099,11 @@
 +	}
 +
 +	for_each_possible_cpu (cpu) {
++#ifdef __i386__
++		struct i386_pda *pda;
++		struct desc_struct *gdt;
++#endif
++
 +		if (cpu == 0)
 +			continue;
 +
@@ -55312,6 +50120,22 @@
 +		}
 +		gdt_descr->size = GDT_SIZE;
 +		memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
++#ifdef __i386__
++		gdt = (struct desc_struct *)gdt_descr->address;
++		pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu));
++
++		if (unlikely(!pda)) {
++			printk(KERN_CRIT "CPU%d failed to allocate PDA\n",
++			       cpu);
++			continue;
++		}
++		cpu_pda(cpu) = pda;
++		cpu_pda(cpu)->cpu_number = cpu;
++		pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
++				(u32 *)&gdt[GDT_ENTRY_PDA].b,
++				(unsigned long)pda, sizeof(*pda) - 1,
++				0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
++#endif
 +		make_page_readonly(
 +			(void *)gdt_descr->address,
 +			XENFEAT_writable_descriptor_tables);
@@ -55326,8 +50150,8 @@
 +		if (IS_ERR(idle))
 +			panic("failed fork for CPU %d", cpu);
 +
-+#ifdef __x86_64__
 +		cpu_pda(cpu)->pcurrent = idle;
++#ifdef __x86_64__
 +		cpu_pda(cpu)->cpunumber = cpu;
 +		clear_ti_thread_flag(idle->thread_info, TIF_FORK);
 +#endif
@@ -55346,17 +50170,17 @@
 +
 +	init_xenbus_allowed_cpumask();
 +
-+#ifdef CONFIG_X86_IO_APIC
 +	/*
 +	 * Here we can be sure that there is an IO-APIC in the system. Let's
 +	 * go and set it up:
 +	 */
++#ifdef CONFIG_X86_IO_APIC
 +	if (!skip_ioapic_setup && nr_ioapics)
 +		setup_IO_APIC();
 +#endif
 +}
 +
-+void __devinit smp_prepare_boot_cpu(void)
++void __init smp_prepare_boot_cpu(void)
 +{
 +}
 +
@@ -55430,7 +50254,7 @@
 +
 +#endif /* CONFIG_HOTPLUG_CPU */
 +
-+int __devinit __cpu_up(unsigned int cpu)
++int __cpuinit __cpu_up(unsigned int cpu)
 +{
 +	int rc;
 +
@@ -55458,15 +50282,30 @@
 +{
 +}
 +
-+#ifndef CONFIG_X86_LOCAL_APIC
++#ifdef CONFIG_X86_MPPARSE
++/*
++ * If the BIOS enumerates physical processors before logical,
++ * maxcpus=N at enumeration-time can be used to disable HT.
++ */
++static int __init parse_maxcpus(char *arg)
++{
++	extern unsigned int maxcpus;
++
++	maxcpus = simple_strtoul(arg, NULL, 0);
++	return 0;
++}
++early_param("maxcpus", parse_maxcpus);
++#endif
++
++#if defined(CONFIG_XEN_UNPRIVILEGED_GUEST) && defined(CONFIG_X86_32)
 +int setup_profiling_timer(unsigned int multiplier)
 +{
 +	return -EINVAL;
 +}
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/core/xen_proc.c linux-2.6.18-xen/drivers/xen/core/xen_proc.c
---- linux-2.6.18.3/drivers/xen/core/xen_proc.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/core/xen_proc.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/core/xen_proc.c b/drivers/xen/core/xen_proc.c
+--- a/drivers/xen/core/xen_proc.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/core/xen_proc.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,18 @@
 +
 +#include <linux/proc_fs.h>
@@ -55486,9 +50325,9 @@
 +{
 +	remove_proc_entry(name, xen_base);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/core/xen_sysfs.c linux-2.6.18-xen/drivers/xen/core/xen_sysfs.c
---- linux-2.6.18.3/drivers/xen/core/xen_sysfs.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/core/xen_sysfs.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/core/xen_sysfs.c b/drivers/xen/core/xen_sysfs.c
+--- a/drivers/xen/core/xen_sysfs.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/core/xen_sysfs.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,378 @@
 +/*
 + *  copyright (c) 2006 IBM Corporation
@@ -55868,9 +50707,9 @@
 +
 +module_init(hyper_sysfs_init);
 +module_exit(hyper_sysfs_exit);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/evtchn/evtchn.c linux-2.6.18-xen/drivers/xen/evtchn/evtchn.c
---- linux-2.6.18.3/drivers/xen/evtchn/evtchn.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/evtchn/evtchn.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/evtchn/evtchn.c b/drivers/xen/evtchn/evtchn.c
+--- a/drivers/xen/evtchn/evtchn.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/evtchn/evtchn.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,456 @@
 +/******************************************************************************
 + * evtchn.c
@@ -56260,84 +51099,1085 @@
 +
 +	free_page((unsigned long)u->ring);
 +
-+	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
-+		int ret;
-+		if (port_user[i] != u)
-+			continue;
++	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
++		int ret;
++		if (port_user[i] != u)
++			continue;
++
++		port_user[i] = NULL;
++		mask_evtchn(i);
++
++		close.port = i;
++		ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++		BUG_ON(ret);
++	}
++
++	spin_unlock_irq(&port_user_lock);
++
++	kfree(u);
++
++	return 0;
++}
++
++static struct file_operations evtchn_fops = {
++	.owner   = THIS_MODULE,
++	.read    = evtchn_read,
++	.write   = evtchn_write,
++	.ioctl   = evtchn_ioctl,
++	.poll    = evtchn_poll,
++	.fasync  = evtchn_fasync,
++	.open    = evtchn_open,
++	.release = evtchn_release,
++};
++
++static struct miscdevice evtchn_miscdev = {
++	.minor        = MISC_DYNAMIC_MINOR,
++	.name         = "evtchn",
++	.fops         = &evtchn_fops,
++};
++
++static int __init evtchn_init(void)
++{
++	int err;
++
++	if (!is_running_on_xen())
++		return -ENODEV;
++
++	spin_lock_init(&port_user_lock);
++	memset(port_user, 0, sizeof(port_user));
++
++	/* Create '/dev/misc/evtchn'. */
++	err = misc_register(&evtchn_miscdev);
++	if (err != 0) {
++		printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
++		return err;
++	}
++
++	printk("Event-channel device installed.\n");
++
++	return 0;
++}
++
++static void evtchn_cleanup(void)
++{
++	misc_deregister(&evtchn_miscdev);
++}
++
++module_init(evtchn_init);
++module_exit(evtchn_cleanup);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/evtchn/Makefile b/drivers/xen/evtchn/Makefile
+--- a/drivers/xen/evtchn/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/evtchn/Makefile	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,2 @@
++
++obj-y	:= evtchn.o
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/fbfront/Makefile b/drivers/xen/fbfront/Makefile
+--- a/drivers/xen/fbfront/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/fbfront/Makefile	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,2 @@
++obj-$(CONFIG_XEN_FRAMEBUFFER)	:= xenfb.o
++obj-$(CONFIG_XEN_KEYBOARD)	+= xenkbd.o
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/fbfront/xenfb.c b/drivers/xen/fbfront/xenfb.c
+--- a/drivers/xen/fbfront/xenfb.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/fbfront/xenfb.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,687 @@
++/*
++ * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
++ *
++ * Copyright (C) 2005-2006 Anthony Liguori <aliguori at us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
++ *
++ *  Based on linux/drivers/video/q40fb.c
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License. See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
++
++/*
++ * TODO:
++ *
++ * Switch to grant tables when they become capable of dealing with the
++ * frame buffer.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
++#include <linux/freezer.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/fbif.h>
++#include <xen/xenbus.h>
++#include <linux/kthread.h>
++
++struct xenfb_mapping
++{
++	struct list_head	link;
++	struct vm_area_struct	*vma;
++	atomic_t		map_refs;
++	int			faults;
++	struct xenfb_info	*info;
++};
++
++struct xenfb_info
++{
++	struct task_struct	*kthread;
++	wait_queue_head_t	wq;
++
++	unsigned char		*fb;
++	struct fb_info		*fb_info;
++	struct timer_list	refresh;
++	int			dirty;
++	int			x1, y1, x2, y2;	/* dirty rectangle,
++						   protected by dirty_lock */
++	spinlock_t		dirty_lock;
++	struct mutex		mm_lock;
++	int			nr_pages;
++	struct page		**pages;
++	struct list_head	mappings; /* protected by mm_lock */
++
++	unsigned		evtchn;
++	int			irq;
++	struct xenfb_page	*page;
++	unsigned long 		*mfns;
++	int			update_wanted; /* XENFB_TYPE_UPDATE wanted */
++
++	struct xenbus_device	*xbdev;
++};
++
++static int xenfb_fps = 20;
++static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
++
++static int xenfb_remove(struct xenbus_device *);
++static void xenfb_init_shared_page(struct xenfb_info *);
++static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
++static void xenfb_disconnect_backend(struct xenfb_info *);
++
++static void xenfb_do_update(struct xenfb_info *info,
++			    int x, int y, int w, int h)
++{
++	union xenfb_out_event event;
++	__u32 prod;
++
++	event.type = XENFB_TYPE_UPDATE;
++	event.update.x = x;
++	event.update.y = y;
++	event.update.width = w;
++	event.update.height = h;
++
++	prod = info->page->out_prod;
++	/* caller ensures !xenfb_queue_full() */
++	mb();			/* ensure ring space available */
++	XENFB_OUT_RING_REF(info->page, prod) = event;
++	wmb();			/* ensure ring contents visible */
++	info->page->out_prod = prod + 1;
++
++	notify_remote_via_evtchn(info->evtchn);
++}
++
++static int xenfb_queue_full(struct xenfb_info *info)
++{
++	__u32 cons, prod;
++
++	prod = info->page->out_prod;
++	cons = info->page->out_cons;
++	return prod - cons == XENFB_OUT_RING_LEN;
++}
++
++static void xenfb_update_screen(struct xenfb_info *info)
++{
++	unsigned long flags;
++	int y1, y2, x1, x2;
++	struct xenfb_mapping *map;
++
++	if (!info->update_wanted)
++		return;
++	if (xenfb_queue_full(info))
++		return;
++
++	mutex_lock(&info->mm_lock);
++
++	spin_lock_irqsave(&info->dirty_lock, flags);
++	y1 = info->y1;
++	y2 = info->y2;
++	x1 = info->x1;
++	x2 = info->x2;
++	info->x1 = info->y1 = INT_MAX;
++	info->x2 = info->y2 = 0;
++	spin_unlock_irqrestore(&info->dirty_lock, flags);
++
++	list_for_each_entry(map, &info->mappings, link) {
++		if (!map->faults)
++			continue;
++		zap_page_range(map->vma, map->vma->vm_start,
++			       map->vma->vm_end - map->vma->vm_start, NULL);
++		map->faults = 0;
++	}
++
++	mutex_unlock(&info->mm_lock);
++
++	xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
++}
++
++static int xenfb_thread(void *data)
++{
++	struct xenfb_info *info = data;
++
++	while (!kthread_should_stop()) {
++		if (info->dirty) {
++			info->dirty = 0;
++			xenfb_update_screen(info);
++		}
++		wait_event_interruptible(info->wq,
++			kthread_should_stop() || info->dirty);
++		try_to_freeze();
++	}
++	return 0;
++}
++
++static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
++			   unsigned blue, unsigned transp,
++			   struct fb_info *info)
++{
++	u32 v;
++
++	if (regno > info->cmap.len)
++		return 1;
++
++	red   >>= (16 - info->var.red.length);
++	green >>= (16 - info->var.green.length);
++	blue  >>= (16 - info->var.blue.length);
++
++	v = (red << info->var.red.offset) |
++	    (green << info->var.green.offset) |
++	    (blue << info->var.blue.offset);
++
++	/* FIXME is this sane?  check against xxxfb_setcolreg()!  */
++	switch (info->var.bits_per_pixel) {
++	case 16:
++	case 24:
++	case 32:
++		((u32 *)info->pseudo_palette)[regno] = v;
++		break;
++	}
++	
++	return 0;
++}
++
++static void xenfb_timer(unsigned long data)
++{
++	struct xenfb_info *info = (struct xenfb_info *)data;
++	info->dirty = 1;
++	wake_up(&info->wq);
++}
++
++static void __xenfb_refresh(struct xenfb_info *info,
++			    int x1, int y1, int w, int h)
++{
++	int y2, x2;
++
++	y2 = y1 + h;
++	x2 = x1 + w;
++
++	if (info->y1 > y1)
++		info->y1 = y1;
++	if (info->y2 < y2)
++		info->y2 = y2;
++	if (info->x1 > x1)
++		info->x1 = x1;
++	if (info->x2 < x2)
++		info->x2 = x2;
++
++	if (timer_pending(&info->refresh))
++		return;
++
++	mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
++}
++
++static void xenfb_refresh(struct xenfb_info *info,
++			  int x1, int y1, int w, int h)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->dirty_lock, flags);
++	__xenfb_refresh(info, x1, y1, w, h);
++	spin_unlock_irqrestore(&info->dirty_lock, flags);
++}
++
++static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
++{
++	struct xenfb_info *info = p->par;
++
++	cfb_fillrect(p, rect);
++	xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
++}
++
++static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
++{
++	struct xenfb_info *info = p->par;
++
++	cfb_imageblit(p, image);
++	xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
++}
++
++static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
++{
++	struct xenfb_info *info = p->par;
++
++	cfb_copyarea(p, area);
++	xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
++}
++
++static void xenfb_vm_open(struct vm_area_struct *vma)
++{
++	struct xenfb_mapping *map = vma->vm_private_data;
++	atomic_inc(&map->map_refs);
++}
++
++static void xenfb_vm_close(struct vm_area_struct *vma)
++{
++	struct xenfb_mapping *map = vma->vm_private_data;
++	struct xenfb_info *info = map->info;
++
++	mutex_lock(&info->mm_lock);
++	if (atomic_dec_and_test(&map->map_refs)) {
++		list_del(&map->link);
++		kfree(map);
++	}
++	mutex_unlock(&info->mm_lock);
++}
++
++static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
++				    unsigned long vaddr, int *type)
++{
++	struct xenfb_mapping *map = vma->vm_private_data;
++	struct xenfb_info *info = map->info;
++	int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
++	unsigned long flags;
++	struct page *page;
++	int y1, y2;
++
++	if (pgnr >= info->nr_pages)
++		return NOPAGE_SIGBUS;
++
++	mutex_lock(&info->mm_lock);
++	spin_lock_irqsave(&info->dirty_lock, flags);
++	page = info->pages[pgnr];
++	get_page(page);
++	map->faults++;
++
++	y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
++	y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
++	if (y2 > info->fb_info->var.yres)
++		y2 = info->fb_info->var.yres;
++	__xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
++	spin_unlock_irqrestore(&info->dirty_lock, flags);
++	mutex_unlock(&info->mm_lock);
++
++	if (type)
++		*type = VM_FAULT_MINOR;
++
++	return page;
++}
++
++static struct vm_operations_struct xenfb_vm_ops = {
++	.open	= xenfb_vm_open,
++	.close	= xenfb_vm_close,
++	.nopage	= xenfb_vm_nopage,
++};
++
++static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
++{
++	struct xenfb_info *info = fb_info->par;
++	struct xenfb_mapping *map;
++	int map_pages;
++
++	if (!(vma->vm_flags & VM_WRITE))
++		return -EINVAL;
++	if (!(vma->vm_flags & VM_SHARED))
++		return -EINVAL;
++	if (vma->vm_pgoff != 0)
++		return -EINVAL;
++
++	map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
++	if (map_pages > info->nr_pages)
++		return -EINVAL;
++
++	map = kzalloc(sizeof(*map), GFP_KERNEL);
++	if (map == NULL)
++		return -ENOMEM;
++
++	map->vma = vma;
++	map->faults = 0;
++	map->info = info;
++	atomic_set(&map->map_refs, 1);
++
++	mutex_lock(&info->mm_lock);
++	list_add(&map->link, &info->mappings);
++	mutex_unlock(&info->mm_lock);
++
++	vma->vm_ops = &xenfb_vm_ops;
++	vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
++	vma->vm_private_data = map;
++
++	return 0;
++}
++
++static struct fb_ops xenfb_fb_ops = {
++	.owner		= THIS_MODULE,
++	.fb_setcolreg	= xenfb_setcolreg,
++	.fb_fillrect	= xenfb_fillrect,
++	.fb_copyarea	= xenfb_copyarea,
++	.fb_imageblit	= xenfb_imageblit,
++	.fb_mmap	= xenfb_mmap,
++};
++
++static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
++				       struct pt_regs *regs)
++{
++	/*
++	 * No in events recognized, simply ignore them all.
++	 * If you need to recognize some, see xenbkd's input_handler()
++	 * for how to do that.
++	 */
++	struct xenfb_info *info = dev_id;
++	struct xenfb_page *page = info->page;
++
++	if (page->in_cons != page->in_prod) {
++		info->page->in_cons = info->page->in_prod;
++		notify_remote_via_evtchn(info->evtchn);
++	}
++	return IRQ_HANDLED;
++}
++
++static unsigned long vmalloc_to_mfn(void *address)
++{
++	return pfn_to_mfn(vmalloc_to_pfn(address));
++}
++
++static int __devinit xenfb_probe(struct xenbus_device *dev,
++				 const struct xenbus_device_id *id)
++{
++	struct xenfb_info *info;
++	struct fb_info *fb_info;
++	int ret;
++
++	info = kzalloc(sizeof(*info), GFP_KERNEL);
++	if (info == NULL) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++		return -ENOMEM;
++	}
++	dev->dev.driver_data = info;
++	info->xbdev = dev;
++	info->irq = -1;
++	info->x1 = info->y1 = INT_MAX;
++	spin_lock_init(&info->dirty_lock);
++	mutex_init(&info->mm_lock);
++	init_waitqueue_head(&info->wq);
++	init_timer(&info->refresh);
++	info->refresh.function = xenfb_timer;
++	info->refresh.data = (unsigned long)info;
++	INIT_LIST_HEAD(&info->mappings);
++
++	info->fb = vmalloc(xenfb_mem_len);
++	if (info->fb == NULL)
++		goto error_nomem;
++	memset(info->fb, 0, xenfb_mem_len);
++
++	info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++	info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
++			      GFP_KERNEL);
++	if (info->pages == NULL)
++		goto error_nomem;
++
++	info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
++	if (!info->mfns)
++		goto error_nomem;
++
++	/* set up shared page */
++	info->page = (void *)__get_free_page(GFP_KERNEL);
++	if (!info->page)
++		goto error_nomem;
++
++	xenfb_init_shared_page(info);
++
++	fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
++				/* see fishy hackery below */
++	if (fb_info == NULL)
++		goto error_nomem;
++
++	/* FIXME fishy hackery */
++	fb_info->pseudo_palette = fb_info->par;
++	fb_info->par = info;
++	/* /FIXME */
++	fb_info->screen_base = info->fb;
++
++	fb_info->fbops = &xenfb_fb_ops;
++	fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
++	fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
++	fb_info->var.bits_per_pixel = info->page->depth;
++
++	fb_info->var.red = (struct fb_bitfield){16, 8, 0};
++	fb_info->var.green = (struct fb_bitfield){8, 8, 0};
++	fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
++
++	fb_info->var.activate = FB_ACTIVATE_NOW;
++	fb_info->var.height = -1;
++	fb_info->var.width = -1;
++	fb_info->var.vmode = FB_VMODE_NONINTERLACED;
++
++	fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
++	fb_info->fix.line_length = info->page->line_length;
++	fb_info->fix.smem_start = 0;
++	fb_info->fix.smem_len = xenfb_mem_len;
++	strcpy(fb_info->fix.id, "xen");
++	fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
++	fb_info->fix.accel = FB_ACCEL_NONE;
++
++	fb_info->flags = FBINFO_FLAG_DEFAULT;
++
++	ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
++	if (ret < 0) {
++		framebuffer_release(fb_info);
++		xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
++		goto error;
++	}
++
++	ret = register_framebuffer(fb_info);
++	if (ret) {
++		fb_dealloc_cmap(&info->fb_info->cmap);
++		framebuffer_release(fb_info);
++		xenbus_dev_fatal(dev, ret, "register_framebuffer");
++		goto error;
++	}
++	info->fb_info = fb_info;
++
++	/* FIXME should this be delayed until backend XenbusStateConnected? */
++	info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
++	if (IS_ERR(info->kthread)) {
++		ret = PTR_ERR(info->kthread);
++		info->kthread = NULL;
++		xenbus_dev_fatal(dev, ret, "register_framebuffer");
++		goto error;
++	}
++
++	ret = xenfb_connect_backend(dev, info);
++	if (ret < 0)
++		goto error;
++
++	return 0;
++
++ error_nomem:
++	ret = -ENOMEM;
++	xenbus_dev_fatal(dev, ret, "allocating device memory");
++ error:
++	xenfb_remove(dev);
++	return ret;
++}
++
++static int xenfb_resume(struct xenbus_device *dev)
++{
++	struct xenfb_info *info = dev->dev.driver_data;
++
++	xenfb_disconnect_backend(info);
++	xenfb_init_shared_page(info);
++	return xenfb_connect_backend(dev, info);
++}
++
++static int xenfb_remove(struct xenbus_device *dev)
++{
++	struct xenfb_info *info = dev->dev.driver_data;
++
++	del_timer(&info->refresh);
++	if (info->kthread)
++		kthread_stop(info->kthread);
++	xenfb_disconnect_backend(info);
++	if (info->fb_info) {
++		unregister_framebuffer(info->fb_info);
++		fb_dealloc_cmap(&info->fb_info->cmap);
++		framebuffer_release(info->fb_info);
++	}
++	free_page((unsigned long)info->page);
++	vfree(info->mfns);
++	kfree(info->pages);
++	vfree(info->fb);
++	kfree(info);
++
++	return 0;
++}
++
++static void xenfb_init_shared_page(struct xenfb_info *info)
++{
++	int i;
++
++	for (i = 0; i < info->nr_pages; i++)
++		info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
++
++	for (i = 0; i < info->nr_pages; i++)
++		info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
++
++	info->page->pd[0] = vmalloc_to_mfn(info->mfns);
++	info->page->pd[1] = 0;
++	info->page->width = XENFB_WIDTH;
++	info->page->height = XENFB_HEIGHT;
++	info->page->depth = XENFB_DEPTH;
++	info->page->line_length = (info->page->depth / 8) * info->page->width;
++	info->page->mem_length = xenfb_mem_len;
++	info->page->in_cons = info->page->in_prod = 0;
++	info->page->out_cons = info->page->out_prod = 0;
++}
++
++static int xenfb_connect_backend(struct xenbus_device *dev,
++				 struct xenfb_info *info)
++{
++	int ret;
++	struct xenbus_transaction xbt;
++
++	ret = xenbus_alloc_evtchn(dev, &info->evtchn);
++	if (ret)
++		return ret;
++	ret = bind_evtchn_to_irqhandler(info->evtchn, xenfb_event_handler,
++					0, "xenfb", info);
++	if (ret < 0) {
++		xenbus_free_evtchn(dev, info->evtchn);
++		xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
++		return ret;
++	}
++	info->irq = ret;
++
++ again:
++	ret = xenbus_transaction_start(&xbt);
++	if (ret) {
++		xenbus_dev_fatal(dev, ret, "starting transaction");
++		return ret;
++	}
++	ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
++			    virt_to_mfn(info->page));
++	if (ret)
++		goto error_xenbus;
++	ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++			    info->evtchn);
++	if (ret)
++		goto error_xenbus;
++	ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
++	if (ret)
++		goto error_xenbus;
++	ret = xenbus_transaction_end(xbt, 0);
++	if (ret) {
++		if (ret == -EAGAIN)
++			goto again;
++		xenbus_dev_fatal(dev, ret, "completing transaction");
++		return ret;
++	}
++
++	xenbus_switch_state(dev, XenbusStateInitialised);
++	return 0;
++
++ error_xenbus:
++	xenbus_transaction_end(xbt, 1);
++	xenbus_dev_fatal(dev, ret, "writing xenstore");
++	return ret;
++}
++
++static void xenfb_disconnect_backend(struct xenfb_info *info)
++{
++	if (info->irq >= 0)
++		unbind_from_irqhandler(info->irq, info);
++	info->irq = -1;
++}
++
++static void xenfb_backend_changed(struct xenbus_device *dev,
++				  enum xenbus_state backend_state)
++{
++	struct xenfb_info *info = dev->dev.driver_data;
++	int val;
++
++	switch (backend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateInitialised:
++	case XenbusStateUnknown:
++	case XenbusStateClosed:
++		break;
++
++	case XenbusStateInitWait:
++	InitWait:
++		xenbus_switch_state(dev, XenbusStateConnected);
++		break;
++
++	case XenbusStateConnected:
++		/*
++		 * Work around xenbus race condition: If backend goes
++		 * through InitWait to Connected fast enough, we can
++		 * get Connected twice here.
++		 */
++		if (dev->state != XenbusStateConnected)
++			goto InitWait; /* no InitWait seen yet, fudge it */
++
++		if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++				 "request-update", "%d", &val) < 0)
++			val = 0;
++		if (val)
++			info->update_wanted = 1;
++		break;
++
++	case XenbusStateClosing:
++		// FIXME is this safe in any dev->state?
++		xenbus_frontend_closed(dev);
++		break;
++	}
++}
++
++static struct xenbus_device_id xenfb_ids[] = {
++	{ "vfb" },
++	{ "" }
++};
++
++static struct xenbus_driver xenfb = {
++	.name = "vfb",
++	.owner = THIS_MODULE,
++	.ids = xenfb_ids,
++	.probe = xenfb_probe,
++	.remove = xenfb_remove,
++	.resume = xenfb_resume,
++	.otherend_changed = xenfb_backend_changed,
++};
++
++static int __init xenfb_init(void)
++{
++	if (!is_running_on_xen())
++		return -ENODEV;
++
++	/* Nothing to do if running in dom0. */
++	if (is_initial_xendomain())
++		return -ENODEV;
++
++	return xenbus_register_frontend(&xenfb);
++}
++
++static void __exit xenfb_cleanup(void)
++{
++	return xenbus_unregister_driver(&xenfb);
++}
++
++module_init(xenfb_init);
++module_exit(xenfb_cleanup);
++
++MODULE_LICENSE("GPL");
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/fbfront/xenkbd.c b/drivers/xen/fbfront/xenkbd.c
+--- a/drivers/xen/fbfront/xenkbd.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/fbfront/xenkbd.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,300 @@
++/*
++ * linux/drivers/input/keyboard/xenkbd.c -- Xen para-virtual input device
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori at us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
++ *
++ *  Based on linux/drivers/input/mouse/sermouse.c
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License. See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
++
++/*
++ * TODO:
++ *
++ * Switch to grant tables together with xenfb.c.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/input.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/fbif.h>
++#include <xen/interface/io/kbdif.h>
++#include <xen/xenbus.h>
++
++struct xenkbd_info
++{
++	struct input_dev *dev;
++	struct xenkbd_page *page;
++	unsigned evtchn;
++	int irq;
++	struct xenbus_device *xbdev;
++};
++
++static int xenkbd_remove(struct xenbus_device *);
++static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
++static void xenkbd_disconnect_backend(struct xenkbd_info *);
++
++/*
++ * Note: if you need to send out events, see xenfb_do_update() for how
++ * to do that.
++ */
++
++static irqreturn_t input_handler(int rq, void *dev_id, struct pt_regs *regs)
++{
++	struct xenkbd_info *info = dev_id;
++	struct xenkbd_page *page = info->page;
++	__u32 cons, prod;
++
++	prod = page->in_prod;
++	if (prod == page->out_cons)
++		return IRQ_HANDLED;
++	rmb();			/* ensure we see ring contents up to prod */
++	for (cons = page->in_cons; cons != prod; cons++) {
++		union xenkbd_in_event *event;
++		event = &XENKBD_IN_RING_REF(page, cons);
++
++		switch (event->type) {
++		case XENKBD_TYPE_MOTION:
++			input_report_rel(info->dev, REL_X, event->motion.rel_x);
++			input_report_rel(info->dev, REL_Y, event->motion.rel_y);
++			break;
++		case XENKBD_TYPE_KEY:
++			input_report_key(info->dev, event->key.keycode, event->key.pressed);
++			break;
++		case XENKBD_TYPE_POS:
++			input_report_abs(info->dev, ABS_X, event->pos.abs_x);
++			input_report_abs(info->dev, ABS_Y, event->pos.abs_y);
++			break;
++		}
++	}
++	input_sync(info->dev);
++	mb();			/* ensure we got ring contents */
++	page->in_cons = cons;
++	notify_remote_via_evtchn(info->evtchn);
++
++	return IRQ_HANDLED;
++}
++
++int __devinit xenkbd_probe(struct xenbus_device *dev,
++			   const struct xenbus_device_id *id)
++{
++	int ret, i;
++	struct xenkbd_info *info;
++	struct input_dev *input_dev;
++
++	info = kzalloc(sizeof(*info), GFP_KERNEL);
++	if (!info) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++		return -ENOMEM;
++	}
++	dev->dev.driver_data = info;
++	info->xbdev = dev;
++
++	info->page = (void *)__get_free_page(GFP_KERNEL);
++	if (!info->page)
++		goto error_nomem;
++	info->page->in_cons = info->page->in_prod = 0;
++	info->page->out_cons = info->page->out_prod = 0;
++
++	input_dev = input_allocate_device();
++	if (!input_dev)
++		goto error_nomem;
++
++	input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
++	input_dev->keybit[LONG(BTN_MOUSE)]
++		= BIT(BTN_LEFT) | BIT(BTN_MIDDLE) | BIT(BTN_RIGHT);
++	/* TODO additional buttons */
++	input_dev->relbit[0] = BIT(REL_X) | BIT(REL_Y);
++
++	/* FIXME not sure this is quite right */
++	for (i = 0; i < 256; i++)
++		set_bit(i, input_dev->keybit);
++
++	input_dev->name = "Xen Virtual Keyboard/Mouse";
++
++	input_set_abs_params(input_dev, ABS_X, 0, XENFB_WIDTH, 0, 0);
++	input_set_abs_params(input_dev, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
++
++	ret = input_register_device(input_dev);
++	if (ret) {
++		input_free_device(input_dev);
++		xenbus_dev_fatal(dev, ret, "input_register_device");
++		goto error;
++	}
++	info->dev = input_dev;
++
++	ret = xenkbd_connect_backend(dev, info);
++	if (ret < 0)
++		goto error;
++
++	return 0;
++
++ error_nomem:
++	ret = -ENOMEM;
++	xenbus_dev_fatal(dev, ret, "allocating device memory");
++ error:
++	xenkbd_remove(dev);
++	return ret;
++}
++
++static int xenkbd_resume(struct xenbus_device *dev)
++{
++	struct xenkbd_info *info = dev->dev.driver_data;
++
++	xenkbd_disconnect_backend(info);
++	return xenkbd_connect_backend(dev, info);
++}
++
++static int xenkbd_remove(struct xenbus_device *dev)
++{
++	struct xenkbd_info *info = dev->dev.driver_data;
++
++	xenkbd_disconnect_backend(info);
++	input_unregister_device(info->dev);
++	free_page((unsigned long)info->page);
++	kfree(info);
++	return 0;
++}
++
++static int xenkbd_connect_backend(struct xenbus_device *dev,
++				  struct xenkbd_info *info)
++{
++	int ret;
++	struct xenbus_transaction xbt;
++
++	ret = xenbus_alloc_evtchn(dev, &info->evtchn);
++	if (ret)
++		return ret;
++	ret = bind_evtchn_to_irqhandler(info->evtchn, input_handler, 0,
++					"xenkbd", info);
++	if (ret < 0) {
++		xenbus_free_evtchn(dev, info->evtchn);
++		xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
++		return ret;
++	}
++	info->irq = ret;
++
++ again:
++	ret = xenbus_transaction_start(&xbt);
++	if (ret) {
++		xenbus_dev_fatal(dev, ret, "starting transaction");
++		return ret;
++	}
++	ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
++			    virt_to_mfn(info->page));
++	if (ret)
++		goto error_xenbus;
++	ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++			    info->evtchn);
++	if (ret)
++		goto error_xenbus;
++	ret = xenbus_transaction_end(xbt, 0);
++	if (ret) {
++		if (ret == -EAGAIN)
++			goto again;
++		xenbus_dev_fatal(dev, ret, "completing transaction");
++		return ret;
++	}
++
++	xenbus_switch_state(dev, XenbusStateInitialised);
++	return 0;
++
++ error_xenbus:
++	xenbus_transaction_end(xbt, 1);
++	xenbus_dev_fatal(dev, ret, "writing xenstore");
++	return ret;
++}
++
++static void xenkbd_disconnect_backend(struct xenkbd_info *info)
++{
++	if (info->irq >= 0)
++		unbind_from_irqhandler(info->irq, info);
++	info->irq = -1;
++}
 +
-+		port_user[i] = NULL;
-+		mask_evtchn(i);
++static void xenkbd_backend_changed(struct xenbus_device *dev,
++				   enum xenbus_state backend_state)
++{
++	struct xenkbd_info *info = dev->dev.driver_data;
++	int ret, val;
 +
-+		close.port = i;
-+		ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
-+		BUG_ON(ret);
-+	}
++	switch (backend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateInitialised:
++	case XenbusStateUnknown:
++	case XenbusStateClosed:
++		break;
 +
-+	spin_unlock_irq(&port_user_lock);
++	case XenbusStateInitWait:
++	InitWait:
++		ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++				   "feature-abs-pointer", "%d", &val);
++		if (ret < 0)
++			val = 0;
++		if (val) {
++			ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
++					    "request-abs-pointer", "1");
++			if (ret)
++				; /* FIXME */
++		}
++		xenbus_switch_state(dev, XenbusStateConnected);
++		break;
 +
-+	kfree(u);
++	case XenbusStateConnected:
++		/*
++		 * Work around xenbus race condition: If backend goes
++		 * through InitWait to Connected fast enough, we can
++		 * get Connected twice here.
++		 */
++		if (dev->state != XenbusStateConnected)
++			goto InitWait; /* no InitWait seen yet, fudge it */
++		break;
 +
-+	return 0;
++	case XenbusStateClosing:
++		xenbus_frontend_closed(dev);
++		break;
++	}
 +}
 +
-+static struct file_operations evtchn_fops = {
-+	.owner   = THIS_MODULE,
-+	.read    = evtchn_read,
-+	.write   = evtchn_write,
-+	.ioctl   = evtchn_ioctl,
-+	.poll    = evtchn_poll,
-+	.fasync  = evtchn_fasync,
-+	.open    = evtchn_open,
-+	.release = evtchn_release,
++static struct xenbus_device_id xenkbd_ids[] = {
++	{ "vkbd" },
++	{ "" }
 +};
 +
-+static struct miscdevice evtchn_miscdev = {
-+	.minor        = MISC_DYNAMIC_MINOR,
-+	.name         = "evtchn",
-+	.fops         = &evtchn_fops,
++static struct xenbus_driver xenkbd = {
++	.name = "vkbd",
++	.owner = THIS_MODULE,
++	.ids = xenkbd_ids,
++	.probe = xenkbd_probe,
++	.remove = xenkbd_remove,
++	.resume = xenkbd_resume,
++	.otherend_changed = xenkbd_backend_changed,
 +};
 +
-+static int __init evtchn_init(void)
++static int __init xenkbd_init(void)
 +{
-+	int err;
-+
 +	if (!is_running_on_xen())
 +		return -ENODEV;
 +
-+	spin_lock_init(&port_user_lock);
-+	memset(port_user, 0, sizeof(port_user));
-+
-+	/* Create '/dev/misc/evtchn'. */
-+	err = misc_register(&evtchn_miscdev);
-+	if (err != 0) {
-+		printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
-+		return err;
-+	}
-+
-+	printk("Event-channel device installed.\n");
++	/* Nothing to do if running in dom0. */
++	if (is_initial_xendomain())
++		return -ENODEV;
 +
-+	return 0;
++	return xenbus_register_frontend(&xenkbd);
 +}
 +
-+static void evtchn_cleanup(void)
++static void __exit xenkbd_cleanup(void)
 +{
-+	misc_deregister(&evtchn_miscdev);
++	return xenbus_unregister_driver(&xenkbd);
 +}
 +
-+module_init(evtchn_init);
-+module_exit(evtchn_cleanup);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/evtchn/Makefile linux-2.6.18-xen/drivers/xen/evtchn/Makefile
---- linux-2.6.18.3/drivers/xen/evtchn/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/evtchn/Makefile	2006-11-19 14:26:56.000000000 +0100
-@@ -0,0 +1,2 @@
++module_init(xenkbd_init);
++module_exit(xenkbd_cleanup);
 +
-+obj-y	:= evtchn.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/Kconfig linux-2.6.18-xen/drivers/xen/Kconfig
---- linux-2.6.18.3/drivers/xen/Kconfig	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/Kconfig	2006-11-19 14:26:55.000000000 +0100
-@@ -0,0 +1,260 @@
++MODULE_LICENSE("GPL");
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/Kconfig b/drivers/xen/Kconfig
+--- a/drivers/xen/Kconfig	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/Kconfig	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,283 @@
 +#
 +# This Kconfig describe xen options
 +#
@@ -56512,6 +52352,29 @@
 +	  dedicated device-driver domain, or your master control domain
 +	  (domain 0), then you almost certainly want to say Y here.
 +
++config XEN_FRAMEBUFFER
++	tristate "Framebuffer-device frontend driver"
++	depends on XEN && FB
++	select FB_CFB_FILLRECT
++	select FB_CFB_COPYAREA
++	select FB_CFB_IMAGEBLIT
++	default y
++	help
++	  The framebuffer-device frontend drivers allows the kernel to create a
++	  virtual framebuffer.  This framebuffer can be viewed in another
++	  domain.  Unless this domain has access to a real video card, you
++	  probably want to say Y here.
++
++config XEN_KEYBOARD
++	tristate "Keyboard-device frontend driver"
++	depends on XEN && XEN_FRAMEBUFFER && INPUT
++	default y
++	help
++	  The keyboard-device frontend driver allows the kernel to create a
++	  virtual keyboard.  This keyboard can then be driven by another
++	  domain.  If you've said Y to CONFIG_XEN_FRAMEBUFFER, you probably
++	  want to say Y here.
++
 +config XEN_SCRUB_PAGES
 +	bool "Scrub memory before freeing it to Xen"
 +	default y
@@ -56598,10 +52461,10 @@
 +	depends on SMP
 +
 +endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/Makefile linux-2.6.18-xen/drivers/xen/Makefile
---- linux-2.6.18.3/drivers/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/Makefile	2006-11-19 14:26:55.000000000 +0100
-@@ -0,0 +1,17 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/Makefile b/drivers/xen/Makefile
+--- a/drivers/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/Makefile	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,19 @@
 +obj-y	+= core/
 +obj-y	+= console/
 +obj-y	+= evtchn/
@@ -56619,9 +52482,11 @@
 +obj-$(CONFIG_XEN_NETDEV_FRONTEND)	+= netfront/
 +obj-$(CONFIG_XEN_PCIDEV_BACKEND)	+= pciback/
 +obj-$(CONFIG_XEN_PCIDEV_FRONTEND)	+= pcifront/
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/netback/common.h linux-2.6.18-xen/drivers/xen/netback/common.h
---- linux-2.6.18.3/drivers/xen/netback/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/netback/common.h	2006-12-05 18:42:37.000000000 +0100
++obj-$(CONFIG_XEN_FRAMEBUFFER)		+= fbfront/
++obj-$(CONFIG_XEN_KEYBOARD)		+= fbfront/
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
+--- a/drivers/xen/netback/common.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/netback/common.h	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,141 @@
 +/******************************************************************************
 + * arch/xen/drivers/netif/backend/common.h
@@ -56749,7 +52614,7 @@
 +
 +int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
 +struct net_device_stats *netif_be_get_stats(struct net_device *dev);
-+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++irqreturn_t netif_be_int(int irq, void *dev_id);
 +
 +static inline int netbk_can_queue(struct net_device *dev)
 +{
@@ -56764,9 +52629,9 @@
 +}
 +
 +#endif /* __NETIF__BACKEND__COMMON_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/netback/interface.c linux-2.6.18-xen/drivers/xen/netback/interface.c
---- linux-2.6.18.3/drivers/xen/netback/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/netback/interface.c	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
+--- a/drivers/xen/netback/interface.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/netback/interface.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,352 @@
 +/******************************************************************************
 + * arch/xen/drivers/netif/backend/interface.c
@@ -57120,10 +52985,10 @@
 +	}
 +	netif_free(netif);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/netback/loopback.c linux-2.6.18-xen/drivers/xen/netback/loopback.c
---- linux-2.6.18.3/drivers/xen/netback/loopback.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/netback/loopback.c	2006-11-19 14:26:56.000000000 +0100
-@@ -0,0 +1,320 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/netback/loopback.c b/drivers/xen/netback/loopback.c
+--- a/drivers/xen/netback/loopback.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/netback/loopback.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,322 @@
 +/******************************************************************************
 + * netback/loopback.c
 + * 
@@ -57181,6 +53046,8 @@
 +#include <net/xfrm.h>		/* secpath_reset() */
 +#include <asm/hypervisor.h>	/* is_initial_xendomain() */
 +
++#include "../../../net/core/kmap_skb.h"
++
 +static int nloopbacks = -1;
 +module_param(nloopbacks, int, 0);
 +MODULE_PARM_DESC(nloopbacks, "Number of netback-loopback devices to create");
@@ -57273,7 +53140,7 @@
 +	np->stats.rx_bytes += skb->len;
 +	np->stats.rx_packets++;
 +
-+	if (skb->ip_summed == CHECKSUM_HW) {
++	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 +		/* Defer checksum calculation. */
 +		skb->proto_csum_blank = 1;
 +		/* Must be a local packet: assert its integrity. */
@@ -57444,18 +53311,18 @@
 +module_exit(loopback_exit);
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/netback/Makefile linux-2.6.18-xen/drivers/xen/netback/Makefile
---- linux-2.6.18.3/drivers/xen/netback/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/netback/Makefile	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/netback/Makefile b/drivers/xen/netback/Makefile
+--- a/drivers/xen/netback/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/netback/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,5 @@
 +obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
 +obj-$(CONFIG_XEN_NETDEV_LOOPBACK) += netloop.o
 +
 +netbk-y   := netback.o xenbus.o interface.o
 +netloop-y := loopback.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/netback/netback.c linux-2.6.18-xen/drivers/xen/netback/netback.c
---- linux-2.6.18.3/drivers/xen/netback/netback.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/netback/netback.c	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
+--- a/drivers/xen/netback/netback.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/netback/netback.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,1510 @@
 +/******************************************************************************
 + * drivers/xen/netback/netback.c
@@ -57598,12 +53465,12 @@
 +
 +/*
 + * A gross way of confirming the origin of an skb data page. The slab
-+ * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
++ * allocator abuses a field in the page struct to cache the struct kmem_cache ptr.
 + */
 +static inline int is_xen_skb(struct sk_buff *skb)
 +{
-+	extern kmem_cache_t *skbuff_cachep;
-+	kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
++	extern struct kmem_cache *skbuff_cachep;
++	struct kmem_cache *cp = (struct kmem_cache *)virt_to_page(skb->head)->lru.next;
 +	return (cp == skbuff_cachep);
 +}
 +
@@ -57943,7 +53810,7 @@
 +			copy_op = npo->copy + npo->copy_cons++;
 +			if (copy_op->status != GNTST_okay) {
 +				DPRINTK("Bad status %d from copy to DOM%d.\n",
-+					gop->status, domid);
++					copy_op->status, domid);
 +				status = NETIF_RSP_ERROR;
 +			}
 +		} else {
@@ -58125,7 +53992,7 @@
 +		id = meta[npo.meta_cons].id;
 +		flags = nr_frags ? NETRXF_more_data : 0;
 +
-+		if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
 +			flags |= NETRXF_csum_blank | NETRXF_data_validated;
 +		else if (skb->proto_data_valid) /* remote but checksummed? */
 +			flags |= NETRXF_data_validated;
@@ -58811,7 +54678,7 @@
 +	netif_idx_release(page->index);
 +}
 +
-+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++irqreturn_t netif_be_int(int irq, void *dev_id)
 +{
 +	netif_t *netif = dev_id;
 +
@@ -58878,7 +54745,7 @@
 +}
 +
 +#ifdef NETBE_DEBUG_INTERRUPT
-+static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
++static irqreturn_t netif_be_dbg(int irq, void *dev_id)
 +{
 +	struct list_head *ent;
 +	netif_t *netif;
@@ -58967,9 +54834,9 @@
 +module_init(netback_init);
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/netback/xenbus.c linux-2.6.18-xen/drivers/xen/netback/xenbus.c
---- linux-2.6.18.3/drivers/xen/netback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/netback/xenbus.c	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
+--- a/drivers/xen/netback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/netback/xenbus.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,438 @@
 +/*  Xenbus code for netif backend
 +    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
@@ -59409,9 +55276,9 @@
 +{
 +	xenbus_register_backend(&netback);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/netfront/Kconfig linux-2.6.18-xen/drivers/xen/netfront/Kconfig
---- linux-2.6.18.3/drivers/xen/netfront/Kconfig	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/netfront/Kconfig	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/netfront/Kconfig b/drivers/xen/netfront/Kconfig
+--- a/drivers/xen/netfront/Kconfig	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/netfront/Kconfig	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,6 @@
 +
 +config XENNET
@@ -59419,17 +55286,17 @@
 +	depends on NETDEVICES && ARCH_XEN
 +	help
 +	  Network driver for Xen
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/netfront/Makefile linux-2.6.18-xen/drivers/xen/netfront/Makefile
---- linux-2.6.18.3/drivers/xen/netfront/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/netfront/Makefile	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/netfront/Makefile b/drivers/xen/netfront/Makefile
+--- a/drivers/xen/netfront/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/netfront/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,4 @@
 +
 +obj-$(CONFIG_XEN_NETDEV_FRONTEND)	:= xennet.o
 +
 +xennet-objs := netfront.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/netfront/netfront.c linux-2.6.18-xen/drivers/xen/netfront/netfront.c
---- linux-2.6.18.3/drivers/xen/netfront/netfront.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/netfront/netfront.c	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/netfront/netfront.c b/drivers/xen/netfront/netfront.c
+--- a/drivers/xen/netfront/netfront.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/netfront/netfront.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,2138 @@
 +/******************************************************************************
 + * Virtual network driver for conversing with remote driver backends.
@@ -59550,7 +55417,7 @@
 +{
 +        return skb_is_gso(skb) &&
 +               (!skb_gso_ok(skb, dev->features) ||
-+                unlikely(skb->ip_summed != CHECKSUM_HW));
++                unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
 +}
 +#else
 +#define netif_needs_gso(dev, skb)	0
@@ -59678,7 +55545,7 @@
 +static void network_alloc_rx_buffers(struct net_device *);
 +static int send_fake_arp(struct net_device *);
 +
-+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++static irqreturn_t netif_int(int irq, void *dev_id);
 +
 +#ifdef CONFIG_SYSFS
 +static int xennet_sysfs_addif(struct net_device *netdev);
@@ -60358,7 +56225,7 @@
 +	tx->flags = 0;
 +	extra = NULL;
 +
-+	if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++	if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
 +		tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
 +#ifdef CONFIG_XEN
 +	if (skb->proto_data_valid) /* remote but checksummed? */
@@ -60413,7 +56280,7 @@
 +	return 0;
 +}
 +
-+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++static irqreturn_t netif_int(int irq, void *dev_id)
 +{
 +	struct net_device *dev = dev_id;
 +	struct netfront_info *np = netdev_priv(dev);
@@ -61569,9 +57436,9 @@
 +module_exit(netif_exit);
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/conf_space.c linux-2.6.18-xen/drivers/xen/pciback/conf_space.c
---- linux-2.6.18.3/drivers/xen/pciback/conf_space.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/conf_space.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/conf_space.c b/drivers/xen/pciback/conf_space.c
+--- a/drivers/xen/pciback/conf_space.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/conf_space.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,425 @@
 +/*
 + * PCI Backend - Functions for creating a virtual configuration space for
@@ -61998,9 +57865,9 @@
 +{
 +	return pciback_config_capability_init();
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/conf_space_capability.c linux-2.6.18-xen/drivers/xen/pciback/conf_space_capability.c
---- linux-2.6.18.3/drivers/xen/pciback/conf_space_capability.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/conf_space_capability.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/conf_space_capability.c b/drivers/xen/pciback/conf_space_capability.c
+--- a/drivers/xen/pciback/conf_space_capability.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/conf_space_capability.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,71 @@
 +/*
 + * PCI Backend - Handles the virtual fields found on the capability lists
@@ -62073,9 +57940,9 @@
 +
 +	return 0;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/conf_space_capability.h linux-2.6.18-xen/drivers/xen/pciback/conf_space_capability.h
---- linux-2.6.18.3/drivers/xen/pciback/conf_space_capability.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/conf_space_capability.h	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/conf_space_capability.h b/drivers/xen/pciback/conf_space_capability.h
+--- a/drivers/xen/pciback/conf_space_capability.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/conf_space_capability.h	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,23 @@
 +/*
 + * PCI Backend - Data structures for special overlays for structures on
@@ -62100,9 +57967,9 @@
 +};
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/conf_space_capability_pm.c linux-2.6.18-xen/drivers/xen/pciback/conf_space_capability_pm.c
---- linux-2.6.18.3/drivers/xen/pciback/conf_space_capability_pm.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/conf_space_capability_pm.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/conf_space_capability_pm.c b/drivers/xen/pciback/conf_space_capability_pm.c
+--- a/drivers/xen/pciback/conf_space_capability_pm.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/conf_space_capability_pm.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,113 @@
 +/*
 + * PCI Backend - Configuration space overlay for power management
@@ -62217,9 +58084,9 @@
 +	.capability = PCI_CAP_ID_PM,
 +	.fields = caplist_pm,
 +};
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/conf_space_capability_vpd.c linux-2.6.18-xen/drivers/xen/pciback/conf_space_capability_vpd.c
---- linux-2.6.18.3/drivers/xen/pciback/conf_space_capability_vpd.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/conf_space_capability_vpd.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/conf_space_capability_vpd.c b/drivers/xen/pciback/conf_space_capability_vpd.c
+--- a/drivers/xen/pciback/conf_space_capability_vpd.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/conf_space_capability_vpd.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,42 @@
 +/*
 + * PCI Backend - Configuration space overlay for Vital Product Data
@@ -62263,9 +58130,9 @@
 +	.capability = PCI_CAP_ID_VPD,
 +	.fields = caplist_vpd,
 +};
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/conf_space.h linux-2.6.18-xen/drivers/xen/pciback/conf_space.h
---- linux-2.6.18.3/drivers/xen/pciback/conf_space.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/conf_space.h	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/conf_space.h b/drivers/xen/pciback/conf_space.h
+--- a/drivers/xen/pciback/conf_space.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/conf_space.h	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,126 @@
 +/*
 + * PCI Backend - Common data structures for overriding the configuration space
@@ -62393,10 +58260,10 @@
 +int pciback_config_capability_add_fields(struct pci_dev *dev);
 +
 +#endif				/* __XEN_PCIBACK_CONF_SPACE_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/conf_space_header.c linux-2.6.18-xen/drivers/xen/pciback/conf_space_header.c
---- linux-2.6.18.3/drivers/xen/pciback/conf_space_header.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/conf_space_header.c	2006-11-19 14:26:56.000000000 +0100
-@@ -0,0 +1,299 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/conf_space_header.c b/drivers/xen/pciback/conf_space_header.c
+--- a/drivers/xen/pciback/conf_space_header.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/conf_space_header.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,301 @@
 +/*
 + * PCI Backend - Handles the virtual fields in the configuration space headers.
 + *
@@ -62419,12 +58286,13 @@
 +
 +static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
 +{
-+	if (!dev->is_enabled && is_enable_cmd(value)) {
++	if (!atomic_read(&dev->enable_cnt) && is_enable_cmd(value)) {
 +		if (unlikely(verbose_request))
 +			printk(KERN_DEBUG "pciback: %s: enable\n",
 +			       pci_name(dev));
-+		pci_enable_device(dev);
-+	} else if (dev->is_enabled && !is_enable_cmd(value)) {
++		if (pci_enable_device(dev))
++			return -ENODEV;
++	} else if (atomic_read(&dev->enable_cnt) && !is_enable_cmd(value)) {
 +		if (unlikely(verbose_request))
 +			printk(KERN_DEBUG "pciback: %s: disable\n",
 +			       pci_name(dev));
@@ -62443,7 +58311,8 @@
 +			printk(KERN_DEBUG
 +			       "pciback: %s: enable memory-write-invalidate\n",
 +			       pci_name(dev));
-+		pci_set_mwi(dev);
++		if (pci_set_mwi(dev))
++			return -EINVAL;
 +	}
 +
 +	return pci_write_config_word(dev, offset, value);
@@ -62696,9 +58565,9 @@
 +      out:
 +	return err;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/conf_space_quirks.c linux-2.6.18-xen/drivers/xen/pciback/conf_space_quirks.c
---- linux-2.6.18.3/drivers/xen/pciback/conf_space_quirks.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/conf_space_quirks.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/conf_space_quirks.c b/drivers/xen/pciback/conf_space_quirks.c
+--- a/drivers/xen/pciback/conf_space_quirks.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/conf_space_quirks.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,128 @@
 +/*
 + * PCI Backend - Handle special overlays for broken devices.
@@ -62828,9 +58697,9 @@
 +      out:
 +	return ret;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/conf_space_quirks.h linux-2.6.18-xen/drivers/xen/pciback/conf_space_quirks.h
---- linux-2.6.18.3/drivers/xen/pciback/conf_space_quirks.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/conf_space_quirks.h	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/conf_space_quirks.h b/drivers/xen/pciback/conf_space_quirks.h
+--- a/drivers/xen/pciback/conf_space_quirks.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/conf_space_quirks.h	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,35 @@
 +/*
 + * PCI Backend - Data structures for special overlays for broken devices.
@@ -62867,9 +58736,9 @@
 +int pciback_field_is_dup(struct pci_dev *dev, int reg);
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/Makefile linux-2.6.18-xen/drivers/xen/pciback/Makefile
---- linux-2.6.18.3/drivers/xen/pciback/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/Makefile	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/Makefile b/drivers/xen/pciback/Makefile
+--- a/drivers/xen/pciback/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,15 @@
 +obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback.o
 +
@@ -62886,9 +58755,9 @@
 +ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
 +EXTRA_CFLAGS += -DDEBUG
 +endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/passthrough.c linux-2.6.18-xen/drivers/xen/pciback/passthrough.c
---- linux-2.6.18.3/drivers/xen/pciback/passthrough.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/passthrough.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/passthrough.c b/drivers/xen/pciback/passthrough.c
+--- a/drivers/xen/pciback/passthrough.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/passthrough.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,157 @@
 +/*
 + * PCI Backend - Provides restricted access to the real PCI bus topology
@@ -63047,9 +58916,9 @@
 +	kfree(dev_data);
 +	pdev->pci_dev_data = NULL;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/pciback.h linux-2.6.18-xen/drivers/xen/pciback/pciback.h
---- linux-2.6.18.3/drivers/xen/pciback/pciback.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/pciback.h	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/pciback.h b/drivers/xen/pciback/pciback.h
+--- a/drivers/xen/pciback/pciback.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/pciback.h	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,93 @@
 +/*
 + * PCI Backend Common Data Structures & Function Declarations
@@ -63092,7 +58961,7 @@
 +
 +	unsigned long flags;
 +
-+	struct work_struct op_work;
++	struct delayed_work op_work;
 +};
 +
 +struct pciback_dev_data {
@@ -63136,18 +59005,18 @@
 +void pciback_release_devices(struct pciback_device *pdev);
 +
 +/* Handles events from front-end */
-+irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs);
-+void pciback_do_op(void *data);
++irqreturn_t pciback_handle_event(int irq, void *dev_id);
++void pciback_do_op(struct work_struct *work);
 +
 +int pciback_xenbus_register(void);
 +void pciback_xenbus_unregister(void);
 +
 +extern int verbose_request;
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/pciback_ops.c linux-2.6.18-xen/drivers/xen/pciback/pciback_ops.c
---- linux-2.6.18.3/drivers/xen/pciback/pciback_ops.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/pciback_ops.c	2006-11-19 14:26:56.000000000 +0100
-@@ -0,0 +1,95 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
+--- a/drivers/xen/pciback/pciback_ops.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/pciback_ops.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,94 @@
 +/*
 + * PCI Backend Operations - respond to PCI requests from Frontend
 + *
@@ -63175,7 +59044,6 @@
 +
 +		pci_write_config_word(dev, PCI_COMMAND, 0);
 +
-+		dev->is_enabled = 0;
 +		dev->is_busmaster = 0;
 +	} else {
 +		pci_read_config_word(dev, PCI_COMMAND, &cmd);
@@ -63194,16 +59062,16 @@
 +	 * already processing a request */
 +	if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
 +	    && !test_and_set_bit(_PDEVF_op_active, &pdev->flags))
-+		schedule_work(&pdev->op_work);
++		schedule_delayed_work(&pdev->op_work, 0);
 +}
 +
 +/* Performing the configuration space reads/writes must not be done in atomic
 + * context because some of the pci_* functions can sleep (mostly due to ACPI
 + * use of semaphores). This function is intended to be called from a work
 + * queue in process context taking a struct pciback_device as a parameter */
-+void pciback_do_op(void *data)
++void pciback_do_op(struct work_struct *work)
 +{
-+	struct pciback_device *pdev = data;
++	struct pciback_device *pdev = container_of(work, struct pciback_device, op_work.work);
 +	struct pci_dev *dev;
 +	struct xen_pci_op *op = &pdev->sh_info->op;
 +
@@ -63235,7 +59103,7 @@
 +	test_and_schedule_op(pdev);
 +}
 +
-+irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs)
++irqreturn_t pciback_handle_event(int irq, void *dev_id)
 +{
 +	struct pciback_device *pdev = dev_id;
 +
@@ -63243,10 +59111,10 @@
 +
 +	return IRQ_HANDLED;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/pci_stub.c linux-2.6.18-xen/drivers/xen/pciback/pci_stub.c
---- linux-2.6.18.3/drivers/xen/pciback/pci_stub.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/pci_stub.c	2006-11-19 14:26:56.000000000 +0100
-@@ -0,0 +1,916 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
+--- a/drivers/xen/pciback/pci_stub.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/pci_stub.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,923 @@
 +/*
 + * PCI Stub Driver - Grabs devices in backend to be exported later
 + *
@@ -64097,13 +59965,20 @@
 +	err = pci_register_driver(&pciback_pci_driver);
 +	if (err < 0)
 +		goto out;
-+
-+	driver_create_file(&pciback_pci_driver.driver, &driver_attr_new_slot);
-+	driver_create_file(&pciback_pci_driver.driver,
-+			   &driver_attr_remove_slot);
-+	driver_create_file(&pciback_pci_driver.driver, &driver_attr_slots);
-+	driver_create_file(&pciback_pci_driver.driver, &driver_attr_quirks);
-+	driver_create_file(&pciback_pci_driver.driver, &driver_attr_permissive);
++	err = driver_create_file(&pciback_pci_driver.driver, &driver_attr_new_slot);
++	if (err < 0)
++		goto out;
++	err = driver_create_file(&pciback_pci_driver.driver,
++				 &driver_attr_remove_slot);
++	if (err < 0)
++		goto out;
++	err = driver_create_file(&pciback_pci_driver.driver, &driver_attr_slots);
++	if (err < 0)
++		goto out;
++	err = driver_create_file(&pciback_pci_driver.driver, &driver_attr_quirks);
++	if (err < 0)
++		goto out;
++	err = driver_create_file(&pciback_pci_driver.driver, &driver_attr_permissive);
 +
 +      out:
 +	return err;
@@ -64163,9 +60038,9 @@
 +module_exit(pciback_cleanup);
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/slot.c linux-2.6.18-xen/drivers/xen/pciback/slot.c
---- linux-2.6.18.3/drivers/xen/pciback/slot.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/slot.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/slot.c b/drivers/xen/pciback/slot.c
+--- a/drivers/xen/pciback/slot.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/slot.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,151 @@
 +/*
 + * PCI Backend - Provides a Virtual PCI bus (with real devices)
@@ -64318,9 +60193,9 @@
 +	kfree(slot_dev);
 +	pdev->pci_dev_data = NULL;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/vpci.c linux-2.6.18-xen/drivers/xen/pciback/vpci.c
---- linux-2.6.18.3/drivers/xen/pciback/vpci.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/vpci.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/vpci.c b/drivers/xen/pciback/vpci.c
+--- a/drivers/xen/pciback/vpci.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/vpci.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,204 @@
 +/*
 + * PCI Backend - Provides a Virtual PCI bus (with real devices)
@@ -64526,9 +60401,9 @@
 +	kfree(vpci_dev);
 +	pdev->pci_dev_data = NULL;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pciback/xenbus.c linux-2.6.18-xen/drivers/xen/pciback/xenbus.c
---- linux-2.6.18.3/drivers/xen/pciback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pciback/xenbus.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
+--- a/drivers/xen/pciback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pciback/xenbus.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,458 @@
 +/*
 + * PCI Backend Xenbus Setup - handles setup with frontend and xend
@@ -64564,7 +60439,7 @@
 +	pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
 +	pdev->be_watching = 0;
 +
-+	INIT_WORK(&pdev->op_work, pciback_do_op, pdev);
++	INIT_DELAYED_WORK(&pdev->op_work, pciback_do_op);
 +
 +	if (pciback_init_devices(pdev)) {
 +		kfree(pdev);
@@ -64988,9 +60863,9 @@
 +{
 +	xenbus_unregister_driver(&xenbus_pciback_driver);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pcifront/Makefile linux-2.6.18-xen/drivers/xen/pcifront/Makefile
---- linux-2.6.18.3/drivers/xen/pcifront/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pcifront/Makefile	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pcifront/Makefile b/drivers/xen/pcifront/Makefile
+--- a/drivers/xen/pcifront/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pcifront/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,7 @@
 +obj-y += pcifront.o
 +
@@ -64999,9 +60874,9 @@
 +ifeq ($(CONFIG_XEN_PCIDEV_FE_DEBUG),y)
 +EXTRA_CFLAGS += -DDEBUG
 +endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pcifront/pci.c linux-2.6.18-xen/drivers/xen/pcifront/pci.c
---- linux-2.6.18.3/drivers/xen/pcifront/pci.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pcifront/pci.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pcifront/pci.c b/drivers/xen/pcifront/pci.c
+--- a/drivers/xen/pcifront/pci.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pcifront/pci.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,46 @@
 +/*
 + * PCI Frontend Operations - ensure only one PCI frontend runs at a time
@@ -65049,9 +60924,9 @@
 +
 +	spin_unlock(&pcifront_dev_lock);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pcifront/pcifront.h linux-2.6.18-xen/drivers/xen/pcifront/pcifront.h
---- linux-2.6.18.3/drivers/xen/pcifront/pcifront.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pcifront/pcifront.h	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pcifront/pcifront.h b/drivers/xen/pcifront/pcifront.h
+--- a/drivers/xen/pcifront/pcifront.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pcifront/pcifront.h	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,40 @@
 +/*
 + * PCI Frontend - Common data structures & function declarations
@@ -65093,9 +60968,9 @@
 +void pcifront_free_roots(struct pcifront_device *pdev);
 +
 +#endif	/* __XEN_PCIFRONT_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pcifront/pci_op.c linux-2.6.18-xen/drivers/xen/pcifront/pci_op.c
---- linux-2.6.18.3/drivers/xen/pcifront/pci_op.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pcifront/pci_op.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pcifront/pci_op.c b/drivers/xen/pcifront/pci_op.c
+--- a/drivers/xen/pcifront/pci_op.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pcifront/pci_op.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,273 @@
 +/*
 + * PCI Frontend Operations - Communicates with frontend
@@ -65370,9 +61245,9 @@
 +		kfree(bus_entry);
 +	}
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/pcifront/xenbus.c linux-2.6.18-xen/drivers/xen/pcifront/xenbus.c
---- linux-2.6.18.3/drivers/xen/pcifront/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/pcifront/xenbus.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/pcifront/xenbus.c b/drivers/xen/pcifront/xenbus.c
+--- a/drivers/xen/pcifront/xenbus.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/pcifront/xenbus.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,295 @@
 +/*
 + * PCI Frontend Xenbus Setup - handles setup with backend (imports page/evtchn)
@@ -65669,15 +61544,15 @@
 +
 +/* Initialize after the Xen PCI Frontend Stub is initialized */
 +subsys_initcall(pcifront_init);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/privcmd/Makefile linux-2.6.18-xen/drivers/xen/privcmd/Makefile
---- linux-2.6.18.3/drivers/xen/privcmd/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/privcmd/Makefile	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/privcmd/Makefile b/drivers/xen/privcmd/Makefile
+--- a/drivers/xen/privcmd/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/privcmd/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,2 @@
 +
 +obj-$(CONFIG_XEN_PRIVCMD)	:= privcmd.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/privcmd/privcmd.c linux-2.6.18-xen/drivers/xen/privcmd/privcmd.c
---- linux-2.6.18.3/drivers/xen/privcmd/privcmd.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/privcmd/privcmd.c	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/privcmd/privcmd.c b/drivers/xen/privcmd/privcmd.c
+--- a/drivers/xen/privcmd/privcmd.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/privcmd/privcmd.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,294 @@
 +/******************************************************************************
 + * privcmd.c
@@ -65973,9 +61848,9 @@
 +}
 +
 +__initcall(privcmd_init);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/tpmback/common.h linux-2.6.18-xen/drivers/xen/tpmback/common.h
---- linux-2.6.18.3/drivers/xen/tpmback/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/tpmback/common.h	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/tpmback/common.h b/drivers/xen/tpmback/common.h
+--- a/drivers/xen/tpmback/common.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/tpmback/common.h	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,86 @@
 +/******************************************************************************
 + * drivers/xen/tpmback/common.h
@@ -66063,9 +61938,9 @@
 +}
 +
 +#endif /* __TPMIF__BACKEND__COMMON_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/tpmback/interface.c linux-2.6.18-xen/drivers/xen/tpmback/interface.c
---- linux-2.6.18.3/drivers/xen/tpmback/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/tpmback/interface.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/tpmback/interface.c b/drivers/xen/tpmback/interface.c
+--- a/drivers/xen/tpmback/interface.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/tpmback/interface.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,182 @@
 + /*****************************************************************************
 + * drivers/xen/tpmback/interface.c
@@ -66084,7 +61959,7 @@
 +#include <xen/balloon.h>
 +#include <xen/gnttab.h>
 +
-+static kmem_cache_t *tpmif_cachep;
++static struct kmem_cache *tpmif_cachep;
 +int num_frontends = 0;
 +
 +LIST_HEAD(tpmif_list);
@@ -66249,17 +62124,17 @@
 +{
 +	kmem_cache_destroy(tpmif_cachep);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/tpmback/Makefile linux-2.6.18-xen/drivers/xen/tpmback/Makefile
---- linux-2.6.18.3/drivers/xen/tpmback/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/tpmback/Makefile	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/tpmback/Makefile b/drivers/xen/tpmback/Makefile
+--- a/drivers/xen/tpmback/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/tpmback/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,4 @@
 +
 +obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmbk.o
 +
 +tpmbk-y += tpmback.o interface.o xenbus.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/tpmback/tpmback.c linux-2.6.18-xen/drivers/xen/tpmback/tpmback.c
---- linux-2.6.18.3/drivers/xen/tpmback/tpmback.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/tpmback/tpmback.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/tpmback/tpmback.c b/drivers/xen/tpmback/tpmback.c
+--- a/drivers/xen/tpmback/tpmback.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/tpmback/tpmback.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,944 @@
 +/******************************************************************************
 + * drivers/xen/tpmback/tpmback.c
@@ -67205,9 +63080,9 @@
 +}
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/tpmback/xenbus.c linux-2.6.18-xen/drivers/xen/tpmback/xenbus.c
---- linux-2.6.18.3/drivers/xen/tpmback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/tpmback/xenbus.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/tpmback/xenbus.c b/drivers/xen/tpmback/xenbus.c
+--- a/drivers/xen/tpmback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/tpmback/xenbus.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,287 @@
 +/*  Xenbus code for tpmif backend
 +    Copyright (C) 2005 IBM Corporation
@@ -67496,9 +63371,9 @@
 +{
 +	xenbus_unregister_driver(&tpmback);
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/util.c linux-2.6.18-xen/drivers/xen/util.c
---- linux-2.6.18.3/drivers/xen/util.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/util.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/util.c b/drivers/xen/util.c
+--- a/drivers/xen/util.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/util.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,70 @@
 +
 +#include <linux/mm.h>
@@ -67570,9 +63445,9 @@
 +	preempt_enable();
 +}
 +EXPORT_SYMBOL_GPL(unlock_vm_area);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/xenbus/Makefile linux-2.6.18-xen/drivers/xen/xenbus/Makefile
---- linux-2.6.18.3/drivers/xen/xenbus/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/xenbus/Makefile	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
+--- a/drivers/xen/xenbus/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/xenbus/Makefile	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,12 @@
 +obj-y	+= xenbus.o
 +obj-$(CONFIG_XEN_BACKEND) += xenbus_be.o
@@ -67586,9 +63461,9 @@
 +xenbus-objs += xenbus_xs.o
 +xenbus-objs += xenbus_probe.o
 +obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/xenbus/xenbus_backend_client.c linux-2.6.18-xen/drivers/xen/xenbus/xenbus_backend_client.c
---- linux-2.6.18.3/drivers/xen/xenbus/xenbus_backend_client.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/xenbus/xenbus_backend_client.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/xenbus/xenbus_backend_client.c b/drivers/xen/xenbus/xenbus_backend_client.c
+--- a/drivers/xen/xenbus/xenbus_backend_client.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/xenbus/xenbus_backend_client.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,147 @@
 +/******************************************************************************
 + * Backend-client-facing interface for the Xenbus driver.  In other words, the
@@ -67737,10 +63612,10 @@
 +EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
 +
 +MODULE_LICENSE("Dual BSD/GPL");
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/xenbus/xenbus_client.c linux-2.6.18-xen/drivers/xen/xenbus/xenbus_client.c
---- linux-2.6.18.3/drivers/xen/xenbus/xenbus_client.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/xenbus/xenbus_client.c	2006-11-19 14:26:56.000000000 +0100
-@@ -0,0 +1,299 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
+--- a/drivers/xen/xenbus/xenbus_client.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/xenbus/xenbus_client.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,300 @@
 +/******************************************************************************
 + * Client-facing interface for the Xenbus driver.  In other words, the
 + * interface between the Xenbus and the device-specific code, be it the
@@ -67794,6 +63669,7 @@
 +	};
 +	return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
 +}
++EXPORT_SYMBOL_GPL(xenbus_strstate);
 +
 +int xenbus_watch_path(struct xenbus_device *dev, const char *path,
 +		      struct xenbus_watch *watch,
@@ -68040,9 +63916,9 @@
 +	return result;
 +}
 +EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/xenbus/xenbus_comms.c linux-2.6.18-xen/drivers/xen/xenbus/xenbus_comms.c
---- linux-2.6.18.3/drivers/xen/xenbus/xenbus_comms.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/xenbus/xenbus_comms.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
+--- a/drivers/xen/xenbus/xenbus_comms.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/xenbus/xenbus_comms.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,203 @@
 +/******************************************************************************
 + * xenbus_comms.c
@@ -68087,13 +63963,13 @@
 +
 +static int xenbus_irq;
 +
-+extern void xenbus_probe(void *);
++extern void xenbus_probe(struct work_struct *);
 +extern int xenstored_ready;
-+static DECLARE_WORK(probe_work, xenbus_probe, NULL);
++static DECLARE_WORK(probe_work, xenbus_probe);
 +
 +DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
 +
-+static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
++static irqreturn_t wake_waiting(int irq, void *unused)
 +{
 +	if (unlikely(xenstored_ready == 0)) {
 +		xenstored_ready = 1;
@@ -68247,9 +64123,9 @@
 +
 +	return 0;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/xenbus/xenbus_comms.h linux-2.6.18-xen/drivers/xen/xenbus/xenbus_comms.h
---- linux-2.6.18.3/drivers/xen/xenbus/xenbus_comms.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/xenbus/xenbus_comms.h	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
+--- a/drivers/xen/xenbus/xenbus_comms.h	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/xenbus/xenbus_comms.h	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,45 @@
 +/*
 + * Private include for xenbus communications.
@@ -68296,9 +64172,9 @@
 +extern int xen_store_evtchn;
 +
 +#endif /* _XENBUS_COMMS_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/xenbus/xenbus_dev.c linux-2.6.18-xen/drivers/xen/xenbus/xenbus_dev.c
---- linux-2.6.18.3/drivers/xen/xenbus/xenbus_dev.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/xenbus/xenbus_dev.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/xenbus/xenbus_dev.c b/drivers/xen/xenbus/xenbus_dev.c
+--- a/drivers/xen/xenbus/xenbus_dev.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/xenbus/xenbus_dev.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,356 @@
 +/*
 + * xenbus_dev.c
@@ -68656,10 +64532,10 @@
 +
 +	return 0;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/xenbus/xenbus_probe.c linux-2.6.18-xen/drivers/xen/xenbus/xenbus_probe.c
---- linux-2.6.18.3/drivers/xen/xenbus/xenbus_probe.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/xenbus/xenbus_probe.c	2006-11-19 14:26:56.000000000 +0100
-@@ -0,0 +1,1189 @@
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+--- a/drivers/xen/xenbus/xenbus_probe.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/xenbus/xenbus_probe.c	2007-03-14 10:55:36.000000000 +0100
+@@ -0,0 +1,1208 @@
 +/******************************************************************************
 + * Talks to Xen Store to figure out what devices we have.
 + *
@@ -69282,8 +65158,13 @@
 +	if (err)
 +		goto fail;
 +
-+	device_create_file(&xendev->dev, &dev_attr_nodename);
-+	device_create_file(&xendev->dev, &dev_attr_devtype);
++	err = device_create_file(&xendev->dev, &dev_attr_nodename);
++	if (err)
++		goto fail;
++
++	err = device_create_file(&xendev->dev, &dev_attr_devtype);
++	if (err)
++		goto fail;
 +
 +	return 0;
 +fail:
@@ -69592,7 +65473,7 @@
 +EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
 +
 +
-+void xenbus_probe(void *unused)
++void xenbus_probe(struct work_struct *unused)
 +{
 +	BUG_ON((xenstored_ready <= 0));
 +
@@ -69660,8 +65541,12 @@
 +		return -ENODEV;
 +
 +	/* Register ourselves with the kernel bus subsystem */
-+	bus_register(&xenbus_frontend.bus);
-+	bus_register(&xenbus_backend.bus);
++	err = bus_register(&xenbus_frontend.bus);
++	if (err)
++		goto err_frontend_bus;
++	err = bus_register(&xenbus_backend.bus);
++	if (err)
++		goto err_backend_bus;
 +
 +	/*
 +	 * Domain0 doesn't have a store_evtchn or store_mfn yet.
@@ -69671,8 +65556,10 @@
 +
 +		/* Allocate page. */
 +		page = get_zeroed_page(GFP_KERNEL);
-+		if (!page)
-+			return -ENOMEM;
++		if (!page) {
++			err = -ENOMEM;
++			goto err_nomem;
++		}
 +
 +		xen_store_mfn = xen_start_info->store_mfn =
 +			pfn_to_mfn(virt_to_phys((void *)page) >>
@@ -69731,8 +65618,12 @@
 +	}
 +
 +	/* Register ourselves with the kernel device subsystem */
-+	device_register(&xenbus_frontend.dev);
-+	device_register(&xenbus_backend.dev);
++	err = device_register(&xenbus_frontend.dev);
++	if (err)
++		goto err;
++	err = device_register(&xenbus_backend.dev);
++	if (err)
++		goto err;
 +
 +	if (!is_initial_xendomain())
 +		xenbus_probe(NULL);
@@ -69748,7 +65639,11 @@
 +	 * must exist because front/backend drivers will use them when they are
 +	 * registered.
 +	 */
-+
++ err_nomem:
++	bus_unregister(&xenbus_frontend.bus);
++ err_backend_bus:
++	bus_unregister(&xenbus_frontend.bus);
++ err_frontend_bus:
 +	return err;
 +}
 +
@@ -69849,9 +65744,9 @@
 +
 +late_initcall(boot_wait_for_devices);
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/drivers/xen/xenbus/xenbus_xs.c linux-2.6.18-xen/drivers/xen/xenbus/xenbus_xs.c
---- linux-2.6.18.3/drivers/xen/xenbus/xenbus_xs.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/drivers/xen/xenbus/xenbus_xs.c	2006-11-19 14:26:56.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
+--- a/drivers/xen/xenbus/xenbus_xs.c	1970-01-01 01:00:00.000000000 +0100
++++ b/drivers/xen/xenbus/xenbus_xs.c	2007-03-14 10:55:36.000000000 +0100
 @@ -0,0 +1,853 @@
 +/******************************************************************************
 + * xenbus_xs.c
@@ -70706,36 +66601,26 @@
 +
 +	return 0;
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/fs/Kconfig linux-2.6.18-xen/fs/Kconfig
---- linux-2.6.18.3/fs/Kconfig	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/fs/Kconfig	2006-11-19 14:26:56.000000000 +0100
-@@ -865,6 +865,7 @@
+diff -x '.hg*' -x '.git*' -urN a/fs/Kconfig b/fs/Kconfig
+--- a/fs/Kconfig	2007-03-16 18:49:35.000000000 +0100
++++ b/fs/Kconfig	2007-03-14 10:55:36.000000000 +0100
+@@ -1003,6 +1003,7 @@
  config HUGETLBFS
  	bool "HugeTLB file system support"
- 	depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
-+	depends !XEN
+ 	depends on X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
++	depends on !XEN
  	help
  	  hugetlbfs is a filesystem backing for HugeTLB pages, based on
  	  ramfs. For architectures that support it, say Y here and read
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-generic/vmlinux.lds.h linux-2.6.18-xen/include/asm-generic/vmlinux.lds.h
---- linux-2.6.18.3/include/asm-generic/vmlinux.lds.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-generic/vmlinux.lds.h	2006-11-19 14:27:04.000000000 +0100
-@@ -194,3 +194,6 @@
- 		.stab.index 0 : { *(.stab.index) }			\
- 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
- 		.comment 0 : { *(.comment) }
-+
-+#define NOTES								\
-+		.notes : { *(.note.*) } :note
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/apic.h linux-2.6.18-xen/include/asm-i386/apic.h
---- linux-2.6.18.3/include/asm-i386/apic.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-i386/apic.h	2006-11-19 14:27:04.000000000 +0100
-@@ -119,10 +119,12 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/apic.h b/include/asm-i386/apic.h
+--- a/include/asm-i386/apic.h	2007-03-16 18:49:38.000000000 +0100
++++ b/include/asm-i386/apic.h	2007-03-14 10:55:42.000000000 +0100
+@@ -116,10 +116,12 @@
  
- extern int disable_timer_pin_1;
+ extern void enable_NMI_through_LVT0 (void * dummy);
  
 +#ifndef CONFIG_XEN
- void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
+ void smp_send_timer_broadcast_ipi(void);
  void switch_APIC_timer_to_ipi(void *cpumask);
  void switch_ipi_to_APIC_timer(void *cpumask);
  #define ARCH_APICTIMER_STOPS_ON_C3	1
@@ -70743,20 +66628,38 @@
  
  extern int timer_over_8254;
  
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/fixmap.h linux-2.6.18-xen/include/asm-i386/fixmap.h
---- linux-2.6.18.3/include/asm-i386/fixmap.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-i386/fixmap.h	2006-12-05 18:42:37.000000000 +0100
-@@ -19,6 +19,7 @@
-  * Leave one empty page between vmalloc'ed areas and
-  * the start of the fixmap.
-  */
-+
- #define __FIXADDR_TOP	0xfffff000
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h
+--- a/include/asm-i386/highmem.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-i386/highmem.h	2007-03-14 10:55:42.000000000 +0100
+@@ -68,6 +68,9 @@
+ void *kmap(struct page *page);
+ void kunmap(struct page *page);
+ void *kmap_atomic(struct page *page, enum km_type type);
++#ifdef CONFIG_XEN
++void *kmap_atomic_pte(struct page *page, enum km_type type);
++#endif
+ void kunmap_atomic(void *kvaddr, enum km_type type);
+ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
+ struct page *kmap_atomic_to_page(void *ptr);
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/kmap_types.h b/include/asm-i386/kmap_types.h
+--- a/include/asm-i386/kmap_types.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-i386/kmap_types.h	2007-03-14 10:55:42.000000000 +0100
+@@ -22,7 +22,12 @@
+ D(10)	KM_IRQ1,
+ D(11)	KM_SOFTIRQ0,
+ D(12)	KM_SOFTIRQ1,
++#ifdef CONFIG_XEN
++D(13)	KM_SWIOTLB,
++D(14)	KM_TYPE_NR
++#else
+ D(13)	KM_TYPE_NR
++#endif
+ };
  
- #ifndef __ASSEMBLY__
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-default/mach_traps.h linux-2.6.18-xen/include/asm-i386/mach-default/mach_traps.h
---- linux-2.6.18.3/include/asm-i386/mach-default/mach_traps.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-i386/mach-default/mach_traps.h	2006-11-19 14:27:04.000000000 +0100
+ #undef D
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-default/mach_traps.h b/include/asm-i386/mach-default/mach_traps.h
+--- a/include/asm-i386/mach-default/mach_traps.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-i386/mach-default/mach_traps.h	2007-03-14 10:55:42.000000000 +0100
 @@ -15,6 +15,18 @@
  	outb(reason, 0x61);
  }
@@ -70776,9 +66679,9 @@
  static inline unsigned char get_nmi_reason(void)
  {
  	return inb(0x61);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/agp.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/agp.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/agp.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/agp.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/agp.h b/include/asm-i386/mach-xen/asm/agp.h
+--- a/include/asm-i386/mach-xen/asm/agp.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/agp.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,37 @@
 +#ifndef AGP_H
 +#define AGP_H 1
@@ -70817,29 +66720,26 @@
 +	dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/desc.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/desc.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/desc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/desc.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,164 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/desc.h b/include/asm-i386/mach-xen/asm/desc.h
+--- a/include/asm-i386/mach-xen/asm/desc.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/desc.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,222 @@
 +#ifndef __ARCH_DESC_H
 +#define __ARCH_DESC_H
 +
 +#include <asm/ldt.h>
 +#include <asm/segment.h>
 +
-+#define CPU_16BIT_STACK_SIZE 1024
-+
 +#ifndef __ASSEMBLY__
 +
 +#include <linux/preempt.h>
 +#include <linux/smp.h>
++#include <linux/percpu.h>
 +
 +#include <asm/mmu.h>
 +
 +extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
 +
-+DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-+
 +struct Xgt_desc_struct {
 +	unsigned short size;
 +	unsigned long address __attribute__((packed));
@@ -70855,52 +66755,109 @@
 +	return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
 +}
 +
++extern struct desc_struct idt_table[];
++extern void set_intr_gate(unsigned int irq, void * addr);
++
++static inline void pack_descriptor(__u32 *a, __u32 *b,
++	unsigned long base, unsigned long limit, unsigned char type, unsigned char flags)
++{
++	*a = ((base & 0xffff) << 16) | (limit & 0xffff);
++	*b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
++		(limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20);
++}
++
++static inline void pack_gate(__u32 *a, __u32 *b,
++	unsigned long base, unsigned short seg, unsigned char type, unsigned char flags)
++{
++	*a = (seg << 16) | (base & 0xffff);
++	*b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff);
++}
++
++#define DESCTYPE_LDT 	0x82	/* present, system, DPL-0, LDT */
++#define DESCTYPE_TSS 	0x89	/* present, system, DPL-0, 32-bit TSS */
++#define DESCTYPE_TASK	0x85	/* present, system, DPL-0, task gate */
++#define DESCTYPE_INT	0x8e	/* present, system, DPL-0, interrupt gate */
++#define DESCTYPE_TRAP	0x8f	/* present, system, DPL-0, trap gate */
++#define DESCTYPE_DPL3	0x60	/* DPL-3 */
++#define DESCTYPE_S	0x10	/* !system */
++
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#else
 +#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
-+#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
 +
 +#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
 +#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
-+#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
-+#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
++#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
++#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
 +
 +#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
 +#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
-+#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
-+#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
++#define store_tr(tr) __asm__ ("str %0":"=m" (tr))
++#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
 +
-+/*
-+ * This is the ldt that every process will get unless we need
-+ * something other than this.
-+ */
-+extern struct desc_struct default_ldt[];
-+extern void set_intr_gate(unsigned int irq, void * addr);
++#if TLS_SIZE != 24
++# error update this code.
++#endif
++
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), *(u64 *)&t->tls_array[i])
++	C(0); C(1); C(2);
++#undef C
++}
 +
-+#define _set_tssldt_desc(n,addr,limit,type) \
-+__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
-+	"movw %w1,2(%2)\n\t" \
-+	"rorl $16,%1\n\t" \
-+	"movb %b1,4(%2)\n\t" \
-+	"movb %4,5(%2)\n\t" \
-+	"movb $0,6(%2)\n\t" \
-+	"movb %h1,7(%2)\n\t" \
-+	"rorl $16,%1" \
-+	: "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
++extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
++#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
++#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
 +
-+#ifndef CONFIG_X86_NO_TSS
-+static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
++static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
 +{
-+	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
-+		offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
++	__u32 *lp = (__u32 *)((char *)dt + entry*8);
++	*lp = entry_a;
++	*(lp+1) = entry_b;
 +}
 +
-+#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
-+#endif
++#define set_ldt native_set_ldt
++#endif /* CONFIG_PARAVIRT */
++
++static inline fastcall void native_set_ldt(const void *addr,
++					   unsigned int entries)
++{
++	if (likely(entries == 0))
++		__asm__ __volatile__("lldt %w0"::"q" (0));
++	else {
++		unsigned cpu = smp_processor_id();
++		__u32 a, b;
++
++		pack_descriptor(&a, &b, (unsigned long)addr,
++				entries * sizeof(struct desc_struct) - 1,
++				DESCTYPE_LDT, 0);
++		write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
++		__asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
++	}
++}
 +
-+static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
++static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
++{
++	__u32 a, b;
++	pack_gate(&a, &b, (unsigned long)addr, seg, type, 0);
++	write_idt_entry(idt_table, gate, a, b);
++}
++
++#ifndef CONFIG_X86_NO_TSS
++static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
 +{
-+	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
++	__u32 a, b;
++	pack_descriptor(&a, &b, (unsigned long)addr,
++			offsetof(struct tss_struct, __cacheline_filler) - 1,
++			DESCTYPE_TSS, 0);
++	write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
 +}
 +
++#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
++#endif
++
 +#define LDT_entry_a(info) \
 +	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
 +
@@ -70926,19 +66883,6 @@
 +	(info)->seg_not_present	== 1	&& \
 +	(info)->useable		== 0	)
 +
-+extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
-+
-+#if TLS_SIZE != 24
-+# error update this code.
-+#endif
-+
-+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-+{
-+#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), *(u64 *)&t->tls_array[i])
-+	C(0); C(1); C(2);
-+#undef C
-+}
-+
 +static inline void clear_LDT(void)
 +{
 +	int cpu = get_cpu();
@@ -70955,22 +66899,16 @@
 +/*
 + * load one particular LDT into the current CPU
 + */
-+static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
++static inline void load_LDT_nolock(mm_context_t *pc)
 +{
-+	void *segments = pc->ldt;
-+	int count = pc->size;
-+
-+	if (likely(!count))
-+		segments = NULL;
-+
-+	xen_set_ldt((unsigned long)segments, count);
++	xen_set_ldt((unsigned long)pc->ldt, pc->size);
 +}
 +
 +static inline void load_LDT(mm_context_t *pc)
 +{
-+	int cpu = get_cpu();
-+	load_LDT_nolock(pc, cpu);
-+	put_cpu();
++	preempt_disable();
++	load_LDT_nolock(pc);
++	preempt_enable();
 +}
 +
 +static inline unsigned long get_desc_base(unsigned long *desc)
@@ -70982,25 +66920,45 @@
 +	return base;
 +}
 +
++#else /* __ASSEMBLY__ */
++
++/*
++ * GET_DESC_BASE reads the descriptor base of the specified segment.
++ *
++ * Args:
++ *    idx - descriptor index
++ *    gdt - GDT pointer
++ *    base - 32bit register to which the base will be written
++ *    lo_w - lo word of the "base" register
++ *    lo_b - lo byte of the "base" register
++ *    hi_b - hi byte of the low word of the "base" register
++ *
++ * Example:
++ *    GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
++ *    Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
++ */
++#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
++	movb idx*8+4(gdt), lo_b; \
++	movb idx*8+7(gdt), hi_b; \
++	shll $16, base; \
++	movw idx*8+2(gdt), lo_w;
++
 +#endif /* !__ASSEMBLY__ */
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/dma-mapping.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/dma-mapping.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/dma-mapping.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/dma-mapping.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/dma-mapping.h b/include/asm-i386/mach-xen/asm/dma-mapping.h
+--- a/include/asm-i386/mach-xen/asm/dma-mapping.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/dma-mapping.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,151 @@
 +#ifndef _ASM_I386_DMA_MAPPING_H
 +#define _ASM_I386_DMA_MAPPING_H
 +
-+/*
-+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
-+ * documentation.
-+ */
-+
 +#include <linux/mm.h>
++
 +#include <asm/cache.h>
 +#include <asm/io.h>
 +#include <asm/scatterlist.h>
++#include <asm/bug.h>
 +#include <asm/swiotlb.h>
 +
 +static inline int
@@ -71038,10 +66996,9 @@
 +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
 +		 enum dma_data_direction direction);
 +
-+extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
-+		      int nents, enum dma_data_direction direction);
-+extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-+			 int nents, enum dma_data_direction direction);
++extern int
++dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
++	   enum dma_data_direction direction);
 +
 +extern dma_addr_t
 +dma_map_page(struct device *dev, struct page *page, unsigned long offset,
@@ -71052,6 +67009,10 @@
 +	       enum dma_data_direction direction);
 +
 +extern void
++dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
++	     enum dma_data_direction direction);
++
++extern void
 +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
 +			enum dma_data_direction direction);
 +
@@ -71118,10 +67079,10 @@
 +	return (1 << INTERNODE_CACHE_SHIFT);
 +}
 +
-+#define dma_is_consistent(d)	(1)
++#define dma_is_consistent(d, h)	(1)
 +
 +static inline void
-+dma_cache_sync(void *vaddr, size_t size,
++dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 +	       enum dma_data_direction direction)
 +{
 +	flush_write_buffers();
@@ -71140,10 +67101,10 @@
 +				  dma_addr_t device_addr, size_t size);
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/fixmap.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/fixmap.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/fixmap.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/fixmap.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,156 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/fixmap.h b/include/asm-i386/mach-xen/asm/fixmap.h
+--- a/include/asm-i386/mach-xen/asm/fixmap.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/fixmap.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,163 @@
 +/*
 + * fixmap.h: compile-time virtual memory allocation
 + *
@@ -71165,7 +67126,13 @@
 + * Leave one empty page between vmalloc'ed areas and
 + * the start of the fixmap.
 + */
++#ifndef CONFIG_COMPAT_VDSO
 +extern unsigned long __FIXADDR_TOP;
++#else
++#define __FIXADDR_TOP  0xfffff000
++#define FIXADDR_USER_START	__fix_to_virt(FIX_VDSO)
++#define FIXADDR_USER_END	__fix_to_virt(FIX_VDSO - 1)
++#endif
 +
 +#ifndef __ASSEMBLY__
 +#include <linux/kernel.h>
@@ -71245,6 +67212,7 @@
 +extern void __set_fixmap(enum fixed_addresses idx,
 +					maddr_t phys, pgprot_t flags);
 +
++extern void reserve_top_address(unsigned long reserve);
 +extern void set_fixaddr_top(unsigned long top);
 +
 +#define set_fixmap(idx, phys) \
@@ -71300,9 +67268,9 @@
 +
 +#endif /* !__ASSEMBLY__ */
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/floppy.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/floppy.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/floppy.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/floppy.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/floppy.h b/include/asm-i386/mach-xen/asm/floppy.h
+--- a/include/asm-i386/mach-xen/asm/floppy.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/floppy.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,147 @@
 +/*
 + * Architecture specific parts of the Floppy driver
@@ -71349,14 +67317,14 @@
 +static int virtual_dma_mode;
 +static int doing_pdma;
 +
-+static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
++static irqreturn_t floppy_hardint(int irq, void *dev_id)
 +{
 +	register unsigned char st;
 +	register int lcount;
 +	register char *lptr;
 +
 +	if (!doing_pdma)
-+		return floppy_interrupt(irq, dev_id, regs);
++		return floppy_interrupt(irq, dev_id);
 +
 +	st = 1;
 +	for(lcount=virtual_dma_count, lptr=virtual_dma_addr; 
@@ -71379,7 +67347,7 @@
 +		virtual_dma_residue += virtual_dma_count;
 +		virtual_dma_count=0;
 +		doing_pdma = 0;
-+		floppy_interrupt(irq, dev_id, regs);
++		floppy_interrupt(irq, dev_id);
 +		return IRQ_HANDLED;
 +	}
 +	return IRQ_HANDLED;
@@ -71451,169 +67419,9 @@
 +#define EXTRA_FLOPPY_PARAMS
 +
 +#endif /* __ASM_XEN_I386_FLOPPY_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/highmem.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/highmem.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/highmem.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/highmem.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,80 @@
-+/*
-+ * highmem.h: virtual kernel memory mappings for high memory
-+ *
-+ * Used in CONFIG_HIGHMEM systems for memory pages which
-+ * are not addressable by direct kernel virtual addresses.
-+ *
-+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
-+ *		      Gerhard.Wichert at pdb.siemens.de
-+ *
-+ *
-+ * Redesigned the x86 32-bit VM architecture to deal with 
-+ * up to 16 Terabyte physical memory. With current x86 CPUs
-+ * we now support up to 64 Gigabytes physical RAM.
-+ *
-+ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
-+ */
-+
-+#ifndef _ASM_HIGHMEM_H
-+#define _ASM_HIGHMEM_H
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/interrupt.h>
-+#include <linux/threads.h>
-+#include <asm/kmap_types.h>
-+#include <asm/tlbflush.h>
-+
-+/* declarations for highmem.c */
-+extern unsigned long highstart_pfn, highend_pfn;
-+
-+extern pte_t *kmap_pte;
-+extern pgprot_t kmap_prot;
-+extern pte_t *pkmap_page_table;
-+
-+/*
-+ * Right now we initialize only a single pte table. It can be extended
-+ * easily, subsequent pte tables have to be allocated in one physical
-+ * chunk of RAM.
-+ */
-+#ifdef CONFIG_X86_PAE
-+#define LAST_PKMAP 512
-+#else
-+#define LAST_PKMAP 1024
-+#endif
-+/*
-+ * Ordering is:
-+ *
-+ * FIXADDR_TOP
-+ * 			fixed_addresses
-+ * FIXADDR_START
-+ * 			temp fixed addresses
-+ * FIXADDR_BOOT_START
-+ * 			Persistent kmap area
-+ * PKMAP_BASE
-+ * VMALLOC_END
-+ * 			Vmalloc area
-+ * VMALLOC_START
-+ * high_memory
-+ */
-+#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
-+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-+#define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-+#define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-+
-+extern void * FASTCALL(kmap_high(struct page *page));
-+extern void FASTCALL(kunmap_high(struct page *page));
-+
-+void *kmap(struct page *page);
-+void kunmap(struct page *page);
-+void *kmap_atomic(struct page *page, enum km_type type);
-+void *kmap_atomic_pte(struct page *page, enum km_type type);
-+void kunmap_atomic(void *kvaddr, enum km_type type);
-+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-+struct page *kmap_atomic_to_page(void *ptr);
-+
-+#define flush_cache_kmaps()	do { } while (0)
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /* _ASM_HIGHMEM_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/hw_irq.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/hw_irq.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/hw_irq.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/hw_irq.h	2006-12-05 18:42:37.000000000 +0100
-@@ -0,0 +1,72 @@
-+#ifndef _ASM_HW_IRQ_H
-+#define _ASM_HW_IRQ_H
-+
-+/*
-+ *	linux/include/asm/hw_irq.h
-+ *
-+ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
-+ *
-+ *	moved some of the old arch/i386/kernel/irq.h to here. VY
-+ *
-+ *	IRQ/IPI changes taken from work by Thomas Radke
-+ *	<tomsoft at informatik.tu-chemnitz.de>
-+ */
-+
-+#include <linux/profile.h>
-+#include <asm/atomic.h>
-+#include <asm/irq.h>
-+#include <asm/sections.h>
-+
-+struct hw_interrupt_type;
-+
-+#define NMI_VECTOR		0x02
-+
-+/*
-+ * Various low-level irq details needed by irq.c, process.c,
-+ * time.c, io_apic.c and smp.c
-+ *
-+ * Interrupt entry/exit code at both C and assembly level
-+ */
-+
-+extern u8 irq_vector[NR_IRQ_VECTORS];
-+#define IO_APIC_VECTOR(irq)	(irq_vector[irq])
-+#define AUTO_ASSIGN		-1
-+
-+extern void (*interrupt[NR_IRQS])(void);
-+
-+#ifdef CONFIG_SMP
-+fastcall void reschedule_interrupt(void);
-+fastcall void invalidate_interrupt(void);
-+fastcall void call_function_interrupt(void);
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+fastcall void apic_timer_interrupt(void);
-+fastcall void error_interrupt(void);
-+fastcall void spurious_interrupt(void);
-+fastcall void thermal_interrupt(struct pt_regs *);
-+#define platform_legacy_irq(irq)	((irq) < 16)
-+#endif
-+
-+void disable_8259A_irq(unsigned int irq);
-+void enable_8259A_irq(unsigned int irq);
-+int i8259A_irq_pending(unsigned int irq);
-+void make_8259A_irq(unsigned int irq);
-+void init_8259A(int aeoi);
-+void FASTCALL(send_IPI_self(int vector));
-+void init_VISWS_APIC_irqs(void);
-+void setup_IO_APIC(void);
-+void disable_IO_APIC(void);
-+void print_IO_APIC(void);
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-+void send_IPI(int dest, int vector);
-+void setup_ioapic_dest(void);
-+
-+extern unsigned long io_apic_irqs;
-+
-+extern atomic_t irq_err_count;
-+extern atomic_t irq_mis_count;
-+
-+#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-+
-+#endif /* _ASM_HW_IRQ_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/hypercall.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/hypercall.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/hypercall.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/hypercall.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/hypercall.h b/include/asm-i386/mach-xen/asm/hypercall.h
+--- a/include/asm-i386/mach-xen/asm/hypercall.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/hypercall.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,389 @@
 +/******************************************************************************
 + * hypercall.h
@@ -72004,9 +67812,9 @@
 +
 +
 +#endif /* __HYPERCALL_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/hypervisor.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/hypervisor.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/hypervisor.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/hypervisor.h b/include/asm-i386/mach-xen/asm/hypervisor.h
+--- a/include/asm-i386/mach-xen/asm/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/hypervisor.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,234 @@
 +/******************************************************************************
 + * hypervisor.h
@@ -72242,10 +68050,10 @@
 +}
 +
 +#endif /* __HYPERVISOR_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/io.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/io.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/io.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/io.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,389 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/io.h b/include/asm-i386/mach-xen/asm/io.h
+--- a/include/asm-i386/mach-xen/asm/io.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/io.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,368 @@
 +#ifndef _ASM_IO_H
 +#define _ASM_IO_H
 +
@@ -72401,7 +68209,11 @@
 + */
 +#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
 +#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
-+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
++
++static inline void *isa_bus_to_virt(unsigned long address)
++{
++	return (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + address);
++}
 +
 +/*
 + * However PCI ones are not necessarily 1:1 and therefore these interfaces
@@ -72485,33 +68297,6 @@
 +
 +#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),(void __force *)(b),(c),(d))
 +
-+/**
-+ *	check_signature		-	find BIOS signatures
-+ *	@io_addr: mmio address to check 
-+ *	@signature:  signature block
-+ *	@length: length of signature
-+ *
-+ *	Perform a signature comparison with the mmio address io_addr. This
-+ *	address should have been obtained by ioremap.
-+ *	Returns 1 on a match.
-+ */
-+ 
-+static inline int check_signature(volatile void __iomem * io_addr,
-+	const unsigned char *signature, int length)
-+{
-+	int retval = 0;
-+	do {
-+		if (readb(io_addr) != *signature)
-+			goto out;
-+		io_addr++;
-+		signature++;
-+		length--;
-+	} while (length);
-+	retval = 1;
-+out:
-+	return retval;
-+}
-+
 +/*
 + *	Cache management
 + *
@@ -72544,11 +68329,11 @@
 +
 +#endif /* __KERNEL__ */
 +
-+#ifdef SLOW_IO_BY_JUMPING
-+#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
++#if defined(CONFIG_PARAVIRT)
++#include <asm/paravirt.h>
 +#else
++
 +#define __SLOW_DOWN_IO "outb %%al,$0x80;"
-+#endif
 +
 +static inline void slow_down_io(void) {
 +	__asm__ __volatile__(
@@ -72559,6 +68344,8 @@
 +		: : );
 +}
 +
++#endif
++
 +#ifdef CONFIG_X86_NUMAQ
 +extern void *xquad_portio;    /* Where the IO area was mapped */
 +#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
@@ -72635,10 +68422,10 @@
 +#define ARCH_HAS_DEV_MEM
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/irqflags.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/irqflags.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/irqflags.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/irqflags.h	2006-12-05 18:42:37.000000000 +0100
-@@ -0,0 +1,80 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/irqflags.h b/include/asm-i386/mach-xen/asm/irqflags.h
+--- a/include/asm-i386/mach-xen/asm/irqflags.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/irqflags.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,96 @@
 +/*
 + * include/asm-i386/irqflags.h
 + *
@@ -72651,12 +68438,13 @@
 +#ifndef _ASM_IRQFLAGS_H
 +#define _ASM_IRQFLAGS_H
 +
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#else
 +#ifndef __ASSEMBLY__
 +
-+#define raw_local_save_flags(flags) \
-+		do { (flags) = __raw_local_save_flags(); } while (0)
-+
 +unsigned long __raw_local_save_flags(void);
++
 +void raw_local_irq_restore(unsigned long flags);
 +void raw_local_irq_disable(void);
 +void raw_local_irq_enable(void);
@@ -72673,20 +68461,35 @@
 + */
 +void halt(void);
 +
-+static inline int raw_irqs_disabled_flags(unsigned long flags)
-+{
-+	return flags != 0;
-+}
-+
-+int raw_irqs_disabled(void);
-+
 +/*
 + * For spinlocks, etc:
 + */
 +unsigned long __raw_local_irq_save(void);
++
++#else
++#define DISABLE_INTERRUPTS(clobbers)	GET_VCPU_INFO				; \
++					__DISABLE_INTERRUPTS
++#define ENABLE_INTERRUPTS(clobbers)	GET_VCPU_INFO				; \
++					__ENABLE_INTERRUPTS
++#define ENABLE_INTERRUPTS_SYSEXIT	NET_DONE_YET
++#define INTERRUPT_RETURN		iret
++#define GET_CR0_INTO_EAX		NOT_DONE_YET
++#endif /* __ASSEMBLY__ */
++#endif /* CONFIG_PARAVIRT */
++
++#ifndef __ASSEMBLY__
++#define raw_local_save_flags(flags) \
++		do { (flags) = __raw_local_save_flags(); } while (0)
++
 +#define raw_local_irq_save(flags) \
 +		do { (flags) = __raw_local_irq_save(); } while (0)
 +
++static inline int raw_irqs_disabled_flags(unsigned long flags)
++{
++	return flags != 0;
++}
++
++int raw_irqs_disabled(void);
 +#endif /* __ASSEMBLY__ */
 +
 +/*
@@ -72719,44 +68522,9 @@
 +#endif
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/kmap_types.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/kmap_types.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/kmap_types.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/kmap_types.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,31 @@
-+#ifndef _ASM_KMAP_TYPES_H
-+#define _ASM_KMAP_TYPES_H
-+
-+
-+#ifdef CONFIG_DEBUG_HIGHMEM
-+# define D(n) __KM_FENCE_##n ,
-+#else
-+# define D(n)
-+#endif
-+
-+enum km_type {
-+D(0)	KM_BOUNCE_READ,
-+D(1)	KM_SKB_SUNRPC_DATA,
-+D(2)	KM_SKB_DATA_SOFTIRQ,
-+D(3)	KM_USER0,
-+D(4)	KM_USER1,
-+D(5)	KM_BIO_SRC_IRQ,
-+D(6)	KM_BIO_DST_IRQ,
-+D(7)	KM_PTE0,
-+D(8)	KM_PTE1,
-+D(9)	KM_IRQ0,
-+D(10)	KM_IRQ1,
-+D(11)	KM_SOFTIRQ0,
-+D(12)	KM_SOFTIRQ1,
-+D(13)	KM_SWIOTLB,
-+D(14)	KM_TYPE_NR
-+};
-+
-+#undef D
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/maddr.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/maddr.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/maddr.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/maddr.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/maddr.h b/include/asm-i386/mach-xen/asm/maddr.h
+--- a/include/asm-i386/mach-xen/asm/maddr.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/maddr.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,160 @@
 +#ifndef _I386_MADDR_H
 +#define _I386_MADDR_H
@@ -72918,10 +68686,10 @@
 +#define __pte_ma(x)	((pte_t) { (x) } )
 +
 +#endif /* _I386_MADDR_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/mmu_context.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/mmu_context.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/mmu_context.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/mmu_context.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,108 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/mmu_context.h b/include/asm-i386/mach-xen/asm/mmu_context.h
+--- a/include/asm-i386/mach-xen/asm/mmu_context.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/mmu_context.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,107 @@
 +#ifndef __I386_SCHED_H
 +#define __I386_SCHED_H
 +
@@ -72955,10 +68723,9 @@
 +	 * are always kernel segments while inside the kernel. Must
 +	 * happen before reload of cr3/ldt (i.e., not in __switch_to).
 +	 */
-+	asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
-+		: "=m" (current->thread.fs),
-+		  "=m" (current->thread.gs));
-+	asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
++	asm volatile ( "mov %%fs,%0"
++		: "=m" (current->thread.fs));
++	asm volatile ( "movl %0,%%fs"
 +		: : "r" (0) );
 +}
 +
@@ -72994,7 +68761,7 @@
 +		 * load the LDT, if the LDT is different:
 +		 */
 +		if (unlikely(prev->context.ldt != next->context.ldt)) {
-+			/* load_LDT_nolock(&next->context, cpu) */
++			/* load_LDT_nolock(&next->context) */
 +			op->cmd = MMUEXT_SET_LDT;
 +			op->arg1.linear_addr = (unsigned long)next->context.ldt;
 +			op->arg2.nr_ents     = next->context.size;
@@ -73013,14 +68780,14 @@
 +			 * tlb flush IPI delivery. We must reload %cr3.
 +			 */
 +			load_cr3(next->pgd);
-+			load_LDT_nolock(&next->context, cpu);
++			load_LDT_nolock(&next->context);
 +		}
 +	}
 +#endif
 +}
 +
-+#define deactivate_mm(tsk, mm) \
-+	asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
++#define deactivate_mm(tsk, mm)			\
++	asm("movl %0,%%fs": :"r" (0));
 +
 +static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
 +{
@@ -73030,43 +68797,10 @@
 +}
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/mmu.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/mmu.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/mmu.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/mmu.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,29 @@
-+#ifndef __i386_MMU_H
-+#define __i386_MMU_H
-+
-+#include <asm/semaphore.h>
-+/*
-+ * The i386 doesn't have a mmu context, but
-+ * we put the segment information here.
-+ *
-+ * cpu_vm_mask is used to optimize ldt flushing.
-+ */
-+typedef struct { 
-+	int size;
-+	struct semaphore sem;
-+	void *ldt;
-+	void *vdso;
-+#ifdef CONFIG_XEN
-+	int has_foreign_mappings;
-+#endif
-+} mm_context_t;
-+
-+/* mm/memory.c:exit_mmap hook */
-+extern void _arch_exit_mmap(struct mm_struct *mm);
-+#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
-+
-+/* kernel/fork.c:dup_mmap hook */
-+extern void _arch_dup_mmap(struct mm_struct *mm);
-+#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/page.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/page.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/page.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/page.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,220 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/page.h b/include/asm-i386/mach-xen/asm/page.h
+--- a/include/asm-i386/mach-xen/asm/page.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/page.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,224 @@
 +#ifndef _I386_PAGE_H
 +#define _I386_PAGE_H
 +
@@ -73176,6 +68910,7 @@
 +	return (unsigned long long)x.pte_high << 32 | x.pte_low;
 +}
 +#define HPAGE_SHIFT	21
++#include <asm-generic/pgtable-nopud.h>
 +#else
 +typedef struct { unsigned long pte_low; } pte_t;
 +typedef struct { unsigned long pgd; } pgd_t;
@@ -73197,6 +68932,7 @@
 +	return ret;
 +}
 +#define HPAGE_SHIFT	22
++#include <asm-generic/pgtable-nopmd.h>
 +#endif
 +#define PTE_MASK	PAGE_MASK
 +
@@ -73245,12 +68981,9 @@
 +
 +#ifdef __ASSEMBLY__
 +#define __PAGE_OFFSET		CONFIG_PAGE_OFFSET
-+#define __PHYSICAL_START	CONFIG_PHYSICAL_START
 +#else
 +#define __PAGE_OFFSET		((unsigned long)CONFIG_PAGE_OFFSET)
-+#define __PHYSICAL_START	((unsigned long)CONFIG_PHYSICAL_START)
 +#endif
-+#define __KERNEL_START		(__PAGE_OFFSET + __PHYSICAL_START)
 +
 +#ifdef CONFIG_XEN_COMPAT_030002
 +#undef LOAD_OFFSET
@@ -73261,6 +68994,9 @@
 +#define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
 +#define MAXMEM			(__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
 +#define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
++/* __pa_symbol should be used for C visible symbols.
++   This seems to be the official gcc blessed way to do such arithmetic. */
++#define __pa_symbol(x)          __pa(RELOC_HIDE((unsigned long)(x),0))
 +#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
 +#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 +#ifdef CONFIG_FLATMEM
@@ -73283,197 +69019,15 @@
 +#include <asm-generic/memory_model.h>
 +#include <asm-generic/page.h>
 +
++#ifndef CONFIG_COMPAT_VDSO
 +#define __HAVE_ARCH_GATE_AREA 1
-+#endif /* __KERNEL__ */
-+
-+#endif /* _I386_PAGE_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/param.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/param.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/param.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/param.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,23 @@
-+#ifndef _ASMi386_PARAM_H
-+#define _ASMi386_PARAM_H
-+
-+#ifdef __KERNEL__
-+# define HZ		CONFIG_HZ	/* Internal kernel timer frequency */
-+# define USER_HZ	100		/* .. some user interfaces are in "ticks" */
-+# define CLOCKS_PER_SEC		(USER_HZ)	/* like times() */
-+#endif
-+
-+#ifndef HZ
-+#define HZ 100
-+#endif
-+
-+#define EXEC_PAGESIZE	4096
-+
-+#ifndef NOGROUP
-+#define NOGROUP		(-1)
-+#endif
-+
-+#define MAXHOSTNAMELEN	64	/* max length of hostname */
-+#define COMMAND_LINE_SIZE 256
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/pci.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pci.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/pci.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pci.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,153 @@
-+#ifndef __i386_PCI_H
-+#define __i386_PCI_H
-+
-+
-+#ifdef __KERNEL__
-+#include <linux/mm.h>		/* for struct page */
-+
-+/* Can be used to override the logic in pci_scan_bus for skipping
-+   already-configured bus numbers - to be used for buggy BIOSes
-+   or architectures with incomplete PCI setup by the loader */
-+
-+#ifdef CONFIG_PCI
-+extern unsigned int pcibios_assign_all_busses(void);
-+#else
-+#define pcibios_assign_all_busses()	0
-+#endif
-+#define pcibios_scan_all_fns(a, b)	0
-+
-+extern unsigned long pci_mem_start;
-+#define PCIBIOS_MIN_IO		0x1000
-+#define PCIBIOS_MIN_MEM		(pci_mem_start)
-+
-+#define PCIBIOS_MIN_CARDBUS_IO	0x4000
-+
-+void pcibios_config_init(void);
-+struct pci_bus * pcibios_scan_root(int bus);
-+
-+void pcibios_set_master(struct pci_dev *dev);
-+void pcibios_penalize_isa_irq(int irq, int active);
-+struct irq_routing_table *pcibios_get_irq_routing_table(void);
-+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-+
-+/* Dynamic DMA mapping stuff.
-+ * i386 has everything mapped statically.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/slab.h>
-+#include <asm/scatterlist.h>
-+#include <linux/string.h>
-+#include <asm/io.h>
-+
-+struct pci_dev;
-+
-+#ifdef CONFIG_SWIOTLB
-+
-+
-+/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
-+#define PCI_DMA_BUS_IS_PHYS	(0)
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
-+	dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
-+	__u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME)			\
-+	((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
-+	(((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME)			\
-+	((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
-+	(((PTR)->LEN_NAME) = (VAL))
-+
-+#else
-+
-+/* The PCI address space does equal the physical memory
-+ * address space.  The networking and block device layers use
-+ * this boolean for bounce buffer decisions.
-+ */
-+#define PCI_DMA_BUS_IS_PHYS	(1)
-+
-+/* pci_unmap_{page,single} is a nop so... */
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-+#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
-+#define pci_unmap_len(PTR, LEN_NAME)		(0)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
-+
-+#endif
-+
-+/* This is always fine. */
-+#define pci_dac_dma_supported(pci_dev, mask)	(1)
-+
-+static inline dma64_addr_t
-+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-+{
-+	return ((dma64_addr_t) page_to_phys(page) +
-+		(dma64_addr_t) offset);
-+}
-+
-+static inline struct page *
-+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+	return pfn_to_page(dma_addr >> PAGE_SHIFT);
-+}
-+
-+static inline unsigned long
-+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+	return (dma_addr & ~PAGE_MASK);
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+	flush_write_buffers();
-+}
-+
-+#define HAVE_PCI_MMAP
-+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-+			       enum pci_mmap_state mmap_state, int write_combine);
-+
-+
-+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-+{
-+}
-+
-+#ifdef CONFIG_PCI
-+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
-+					enum pci_dma_burst_strategy *strat,
-+					unsigned long *strategy_parameter)
-+{
-+	*strat = PCI_DMA_BURST_INFINITY;
-+	*strategy_parameter = ~0UL;
-+}
 +#endif
-+
 +#endif /* __KERNEL__ */
 +
-+#ifdef CONFIG_XEN_PCIDEV_FRONTEND
-+#include <xen/pcifront.h>
-+#endif /* CONFIG_XEN_PCIDEV_FRONTEND */
-+
-+/* implement the pci_ DMA API in terms of the generic device dma_ one */
-+#include <asm-generic/pci-dma-compat.h>
-+
-+/* generic pci stuff */
-+#include <asm-generic/pci.h>
-+
-+/* On Xen we have to scan all functions since Xen hides bridges from
-+ * us.  If a bridge is at fn=0 and that slot has a multifunction
-+ * device, we won't find the additional devices without scanning all
-+ * functions. */
-+#undef pcibios_scan_all_fns
-+#define pcibios_scan_all_fns(a, b)	1
-+
-+#endif /* __i386_PCI_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgalloc.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgalloc.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgalloc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgalloc.h	2006-11-19 14:27:04.000000000 +0100
++#endif /* _I386_PAGE_H */
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/pgalloc.h b/include/asm-i386/mach-xen/asm/pgalloc.h
+--- a/include/asm-i386/mach-xen/asm/pgalloc.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/pgalloc.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,63 @@
 +#ifndef _I386_PGALLOC_H
 +#define _I386_PGALLOC_H
@@ -73538,40 +69092,13 @@
 +#define check_pgt_cache()	do { } while (0)
 +
 +#endif /* _I386_PGALLOC_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,21 @@
-+#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
-+#define _I386_PGTABLE_2LEVEL_DEFS_H
-+
-+#define HAVE_SHARED_KERNEL_PMD 0
-+
-+/*
-+ * traditional i386 two-level paging structure:
-+ */
-+
-+#define PGDIR_SHIFT	22
-+#define PTRS_PER_PGD	1024
-+#define PTRS_PER_PGD_NO_HV	(HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
-+
-+/*
-+ * the i386 is two-level, so we don't really have any
-+ * PMD directory physically.
-+ */
-+
-+#define PTRS_PER_PTE	1024
-+
-+#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgtable-2level.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgtable-2level.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgtable-2level.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgtable-2level.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,87 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/pgtable-2level.h b/include/asm-i386/mach-xen/asm/pgtable-2level.h
+--- a/include/asm-i386/mach-xen/asm/pgtable-2level.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/pgtable-2level.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,77 @@
 +#ifndef _I386_PGTABLE_2LEVEL_H
 +#define _I386_PGTABLE_2LEVEL_H
 +
-+#include <asm-generic/pgtable-nopmd.h>
-+
 +#define pte_ERROR(e) \
 +	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
 +#define pgd_ERROR(e) \
@@ -73582,6 +69109,7 @@
 + * within a page table are directly modified.  Thus, the following
 + * hook is made available.
 + */
++#ifndef CONFIG_PARAVIRT
 +#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
 +
 +#define set_pte_at(_mm,addr,ptep,pteval) do {				\
@@ -73589,30 +69117,21 @@
 +	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
 +		set_pte((ptep), (pteval));				\
 +} while (0)
-+
-+#define set_pte_at_sync(_mm,addr,ptep,pteval) do {			\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
-+		set_pte((ptep), (pteval));				\
-+		xen_invlpg((addr));					\
-+	}								\
-+} while (0)
++#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
++#endif
 +
 +#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
-+
-+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
++#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
 +
 +#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 +#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
 +
-+#define ptep_get_and_clear(mm,addr,xp)	__pte_ma(xchg(&(xp)->pte_low, 0))
-+#define pte_same(a, b)		((a).pte_low == (b).pte_low)
-+#define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
-+#define pte_pfn(_pte) mfn_to_local_pfn(pte_mfn(_pte))
-+
-+#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
++#define raw_ptep_get_and_clear(mm,addr,xp)	__pte_ma(xchg(&(xp)->pte_low, 0))
 +
++#define pte_mfn(_pte) 		((_pte).pte_low >> PAGE_SHIFT)
++#define pte_page(x)		pfn_to_page(pte_pfn(x))
 +#define pte_none(x)		(!(x).pte_low)
++#define pte_pfn(x) 		mfn_to_local_pfn(pte_mfn(x))
 +#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 +#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 +
@@ -73654,44 +69173,13 @@
 +void vmalloc_sync_all(void);
 +
 +#endif /* _I386_PGTABLE_2LEVEL_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,25 @@
-+#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
-+#define _I386_PGTABLE_3LEVEL_DEFS_H
-+
-+#define HAVE_SHARED_KERNEL_PMD 0
-+
-+/*
-+ * PGDIR_SHIFT determines what a top-level page table entry can map
-+ */
-+#define PGDIR_SHIFT	30
-+#define PTRS_PER_PGD	4
-+#define PTRS_PER_PGD_NO_HV 4
-+
-+/*
-+ * PMD_SHIFT determines the size of the area a middle-level
-+ * page table can map
-+ */
-+#define PMD_SHIFT	21
-+#define PTRS_PER_PMD	512
-+
-+/*
-+ * entries per page directory level
-+ */
-+#define PTRS_PER_PTE	512
-+
-+#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgtable-3level.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgtable-3level.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgtable-3level.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgtable-3level.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,185 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/pgtable-3level.h b/include/asm-i386/mach-xen/asm/pgtable-3level.h
+--- a/include/asm-i386/mach-xen/asm/pgtable-3level.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/pgtable-3level.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,194 @@
 +#ifndef _I386_PGTABLE_3LEVEL_H
 +#define _I386_PGTABLE_3LEVEL_H
 +
-+#include <asm-generic/pgtable-nopud.h>
-+
 +/*
 + * Intel Physical Address Extension (PAE) Mode - three-level page
 + * tables on PPro+ CPUs.
@@ -73733,6 +69221,7 @@
 +	return pte_x(pte);
 +}
 +
++#ifndef CONFIG_PARAVIRT
 +/* Rules for using set_pte: the pte being assigned *must* be
 + * either not present or in a state where the hardware will
 + * not attempt to update the pte.  In places where this is
@@ -73751,11 +69240,27 @@
 +}
 +# define set_pte_atomic(pteptr,pteval) \
 +		set_64bit((unsigned long long *)(pteptr),pte_val_ma(pteval))
++
++/*
++ * Since this is only called on user PTEs, and the page fault handler
++ * must handle the already racy situation of simultaneous page faults,
++ * we are justified in merely clearing the PTE present bit, followed
++ * by a set.  The ordering here is important.
++ */
++static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
++{
++	ptep->pte_low = 0;
++	smp_wmb();
++	ptep->pte_high = pte.pte_high;
++	smp_wmb();
++	ptep->pte_low = pte.pte_low;
++}
 +#else
 +/* no writable pagetables */
 +# define set_pte(pteptr,pteval)				\
 +		xen_l1_entry_update((pteptr), (pteval))
 +# define set_pte_atomic(pteptr,pteval) set_pte(pteptr,pteval)
++# define set_pte_pressent(mm,addr,ptep,pte) set_pte_at(mm,addr,ptep,pteval)
 +#endif
 +
 +#define set_pte_at(_mm,addr,ptep,pteval) do {				\
@@ -73764,39 +69269,12 @@
 +		set_pte((ptep), (pteval));				\
 +} while (0)
 +
-+#define set_pte_at_sync(_mm,addr,ptep,pteval) do {			\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
-+		set_pte((ptep), (pteval));				\
-+		xen_invlpg((addr));					\
-+	}								\
-+} while (0)
-+
 +#define set_pmd(pmdptr,pmdval)				\
 +		xen_l2_entry_update((pmdptr), (pmdval))
 +#define set_pud(pudptr,pudval) \
 +		xen_l3_entry_update((pudptr), (pudval))
 +
 +/*
-+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
-+ * the TLB via cr3 if the top-level pgd is changed...
-+ * We do not let the generic code free and clear pgd entries due to
-+ * this erratum.
-+ */
-+static inline void pud_clear (pud_t * pud) { }
-+
-+#define pud_page(pud) \
-+((struct page *) __va(pud_val(pud) & PAGE_MASK))
-+
-+#define pud_page_kernel(pud) \
-+((unsigned long) __va(pud_val(pud) & PAGE_MASK))
-+
-+
-+/* Find an entry in the second-level page table.. */
-+#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
-+			pmd_index(address))
-+
-+/*
 + * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
 + * entry, so clear the bottom half first and enforce ordering with a compiler
 + * barrier.
@@ -73810,7 +69288,7 @@
 +
 +#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
 +
-+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++static inline pte_t raw_ptep_get_and_clear(pte_t *ptep)
 +{
 +	pte_t res;
 +
@@ -73821,7 +69299,28 @@
 +
 +	return res;
 +}
++#endif
++
++/*
++ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
++ * the TLB via cr3 if the top-level pgd is changed...
++ * We do not let the generic code free and clear pgd entries due to
++ * this erratum.
++ */
++static inline void pud_clear (pud_t * pud) { }
++
++#define pud_page(pud) \
++((struct page *) __va(pud_val(pud) & PAGE_MASK))
 +
++#define pud_page_vaddr(pud) \
++((unsigned long) __va(pud_val(pud) & PAGE_MASK))
++
++
++/* Find an entry in the second-level page table.. */
++#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
++			pmd_index(address))
++
++#define __HAVE_ARCH_PTE_SAME
 +static inline int pte_same(pte_t a, pte_t b)
 +{
 +	return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
@@ -73872,10 +69371,10 @@
 +#define vmalloc_sync_all() ((void)0)
 +
 +#endif /* _I386_PGTABLE_3LEVEL_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgtable.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgtable.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/pgtable.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/pgtable.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,508 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/pgtable.h b/include/asm-i386/mach-xen/asm/pgtable.h
+--- a/include/asm-i386/mach-xen/asm/pgtable.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/pgtable.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,561 @@
 +#ifndef _I386_PGTABLE_H
 +#define _I386_PGTABLE_H
 +
@@ -73894,6 +69393,7 @@
 +#include <asm/processor.h>
 +#include <asm/fixmap.h>
 +#include <linux/threads.h>
++#include <asm/paravirt.h>
 +
 +#ifndef _I386_BITOPS_H
 +#include <asm/bitops.h>
@@ -73913,14 +69413,14 @@
 +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 +extern unsigned long empty_zero_page[1024];
 +extern pgd_t *swapper_pg_dir;
-+extern kmem_cache_t *pgd_cache;
-+extern kmem_cache_t *pmd_cache;
++extern struct kmem_cache *pgd_cache;
++extern struct kmem_cache *pmd_cache;
 +extern spinlock_t pgd_lock;
 +extern struct page *pgd_list;
 +
-+void pmd_ctor(void *, kmem_cache_t *, unsigned long);
-+void pgd_ctor(void *, kmem_cache_t *, unsigned long);
-+void pgd_dtor(void *, kmem_cache_t *, unsigned long);
++void pmd_ctor(void *, struct kmem_cache *, unsigned long);
++void pgd_ctor(void *, struct kmem_cache *, unsigned long);
++void pgd_dtor(void *, struct kmem_cache *, unsigned long);
 +void pgtable_cache_init(void);
 +void paging_init(void);
 +
@@ -74127,20 +69627,88 @@
 +# include <asm/pgtable-2level.h>
 +#endif
 +
-+static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-+{
-+	if (!pte_dirty(*ptep))
-+		return 0;
-+	return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
-+}
++#ifndef CONFIG_PARAVIRT
++/*
++ * Rules for using pte_update - it must be called after any PTE update which
++ * has not been done using the set_pte / clear_pte interfaces.  It is used by
++ * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
++ * updates should either be sets, clears, or set_pte_atomic for P->P
++ * transitions, which means this hook should only be called for user PTEs.
++ * This hook implies a P->P protection or access change has taken place, which
++ * requires a subsequent TLB flush.  The notification can optionally be delayed
++ * until the TLB flush event by using the pte_update_defer form of the
++ * interface, but care must be taken to assure that the flush happens while
++ * still holding the same page table lock so that the shadow and primary pages
++ * do not become out of sync on SMP.
++ */
++#define pte_update(mm, addr, ptep)		do { } while (0)
++#define pte_update_defer(mm, addr, ptep)	do { } while (0)
++#endif
 +
-+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
++/*
++ * We only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time.
++ */
++#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
++	do {								  \
++		if (__dirty) {						  \
++		        if ( likely((__vma)->vm_mm == current->mm) ) {    \
++			    BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
++			    pte_update_defer((__vma)->vm_mm, (__address), (__ptep)); \
++			} else {                                          \
++                            xen_l1_entry_update((__ptep), (__entry)); \
++			    pte_update_defer((__vma)->vm_mm, (__address), (__ptep)); \
++			    flush_tlb_page((__vma), (__address));         \
++			}                                                 \
++		}							  \
++	} while (0)
++
++/*
++ * We don't actually have these, but we want to advertise them so that
++ * we can encompass the flush here.
++ */
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++
++#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
++#define ptep_clear_flush_dirty(vma, address, ptep)			\
++({									\
++	int __dirty;							\
++	__dirty = pte_dirty(*(ptep));					\
++	if (__dirty) {							\
++		clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low);		\
++		pte_update_defer((vma)->vm_mm, (address), (ptep));	\
++		flush_tlb_page(vma, address);				\
++	}								\
++	__dirty;							\
++})
++
++#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
++#define ptep_clear_flush_young(vma, address, ptep)			\
++({									\
++	int __young;							\
++	__young = pte_young(*(ptep));					\
++	if (__young) {							\
++		clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low);	\
++		pte_update_defer((vma)->vm_mm, (address), (ptep));	\
++		flush_tlb_page(vma, address);				\
++	}								\
++	__young;							\
++})
++
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 +{
-+	if (!pte_young(*ptep))
-+		return 0;
-+	return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
++	pte_t pte = raw_ptep_get_and_clear(ptep);
++	pte_update(mm, addr, ptep);
++	return pte;
 +}
 +
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
 +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
 +{
 +	pte_t pte;
@@ -74153,10 +69721,12 @@
 +	return pte;
 +}
 +
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 +{
 +	if (pte_write(*ptep))
 +		clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
++	pte_update(mm, addr, ptep);
 +}
 +
 +/*
@@ -74246,11 +69816,11 @@
 +#define pte_index(address) \
 +		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 +#define pte_offset_kernel(dir, address) \
-+	((pte_t *) pmd_page_kernel(*(dir)) +  pte_index(address))
++	((pte_t *) pmd_page_vaddr(*(dir)) +  pte_index(address))
 +
 +#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
 +
-+#define pmd_page_kernel(pmd) \
++#define pmd_page_vaddr(pmd) \
 +		((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 +
 +/*
@@ -74273,8 +69843,6 @@
 + static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
 +#endif
 +
-+extern void noexec_setup(const char *str);
-+
 +#if defined(CONFIG_HIGHPTE)
 +#define pte_offset_map(dir, address) \
 +	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
@@ -74292,29 +69860,19 @@
 +#define pte_unmap_nested(pte) do { } while (0)
 +#endif
 +
++/* Clear a kernel PTE and flush it from the TLB */
++#define kpte_clear_flush(ptep, vaddr)					\
++do {									\
++	pte_clear(&init_mm, vaddr, ptep);				\
++	__flush_tlb_one(vaddr);						\
++} while (0)
++
 +/*
 + * The i386 doesn't have any external MMU info: the kernel page
 + * tables contain all the necessary information.
-+ *
-+ * Also, we only update the dirty/accessed state if we set
-+ * the dirty bit by hand in the kernel, since the hardware
-+ * will do the accessed bit for us, and we don't want to
-+ * race with other CPU's that might be updating the dirty
-+ * bit at the same time.
 + */
 +#define update_mmu_cache(vma,address,pte) do { } while (0)
 +#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-+	do {								  \
-+		if (__dirty) {						  \
-+		        if ( likely((__vma)->vm_mm == current->mm) ) {    \
-+			    BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
-+			} else {                                          \
-+                            xen_l1_entry_update((__ptep), (__entry)); \
-+			    flush_tlb_page((__vma), (__address));         \
-+			}                                                 \
-+		}							  \
-+	} while (0)
 +
 +#define __HAVE_ARCH_PTEP_ESTABLISH
 +#define ptep_establish(__vma, __address, __ptep, __entry)		\
@@ -74368,26 +69926,20 @@
 +                    unsigned long address,
 +                    unsigned long size);
 +
-+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
-+direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
++#define io_remap_pfn_range(vma,vaddr,pfn,size,prot) \
++	direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
 +
 +#define MK_IOSPACE_PFN(space, pfn)	(pfn)
 +#define GET_IOSPACE(pfn)		0
 +#define GET_PFN(pfn)			(pfn)
 +
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-+#define __HAVE_ARCH_PTE_SAME
 +#include <asm-generic/pgtable.h>
 +
 +#endif /* _I386_PGTABLE_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/processor.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/processor.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/processor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/processor.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,741 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/processor.h b/include/asm-i386/mach-xen/asm/processor.h
+--- a/include/asm-i386/mach-xen/asm/processor.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/processor.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,759 @@
 +/*
 + * include/asm-i386/processor.h
 + *
@@ -74410,6 +69962,7 @@
 +#include <linux/threads.h>
 +#include <asm/percpu.h>
 +#include <linux/cpumask.h>
++#include <linux/init.h>
 +#include <xen/interface/physdev.h>
 +
 +/* flag for disabling the tsc */
@@ -74463,6 +70016,7 @@
 +#endif
 +	unsigned char x86_max_cores;	/* cpuid returned max cores value */
 +	unsigned char apicid;
++	unsigned short x86_clflush_size;
 +#ifdef CONFIG_SMP
 +	unsigned char booted_cores;	/* number of cores as seen by OS */
 +	__u8 phys_proc_id; 		/* Physical processor id. */
@@ -74504,6 +70058,8 @@
 +extern	int cpu_llc_id[NR_CPUS];
 +extern char ignore_fpu_irq;
 +
++void __init cpu_detect(struct cpuinfo_x86 *c);
++
 +extern void identify_cpu(struct cpuinfo_x86 *);
 +extern void print_cpu_info(struct cpuinfo_x86 *);
 +extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
@@ -74536,75 +70092,16 @@
 +#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
 +#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
 +
-+/*
-+ * Generic CPUID function
-+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
-+ * resulting in stale register contents being returned.
-+ */
-+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
-+{
-+	__asm__(XEN_CPUID
-+		: "=a" (*eax),
-+		  "=b" (*ebx),
-+		  "=c" (*ecx),
-+		  "=d" (*edx)
-+		: "0" (op), "c"(0));
-+}
-+
-+/* Some CPUID calls want 'count' to be placed in ecx */
-+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
-+	       	int *edx)
++static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
++					 unsigned int *ecx, unsigned int *edx)
 +{
++	/* ecx is often an input as well as an output. */
 +	__asm__(XEN_CPUID
 +		: "=a" (*eax),
 +		  "=b" (*ebx),
 +		  "=c" (*ecx),
 +		  "=d" (*edx)
-+		: "0" (op), "c" (count));
-+}
-+
-+/*
-+ * CPUID functions returning a single datum
-+ */
-+static inline unsigned int cpuid_eax(unsigned int op)
-+{
-+	unsigned int eax;
-+
-+	__asm__(XEN_CPUID
-+		: "=a" (eax)
-+		: "0" (op)
-+		: "bx", "cx", "dx");
-+	return eax;
-+}
-+static inline unsigned int cpuid_ebx(unsigned int op)
-+{
-+	unsigned int eax, ebx;
-+
-+	__asm__(XEN_CPUID
-+		: "=a" (eax), "=b" (ebx)
-+		: "0" (op)
-+		: "cx", "dx" );
-+	return ebx;
-+}
-+static inline unsigned int cpuid_ecx(unsigned int op)
-+{
-+	unsigned int eax, ecx;
-+
-+	__asm__(XEN_CPUID
-+		: "=a" (eax), "=c" (ecx)
-+		: "0" (op)
-+		: "bx", "dx" );
-+	return ecx;
-+}
-+static inline unsigned int cpuid_edx(unsigned int op)
-+{
-+	unsigned int eax, edx;
-+
-+	__asm__(XEN_CPUID
-+		: "=a" (eax), "=d" (edx)
-+		: "0" (op)
-+		: "bx", "cx");
-+	return edx;
++		: "0" (*eax), "2" (*ecx));
 +}
 +
 +#define load_cr3(pgdir) write_cr3(__pa(pgdir))
@@ -74705,6 +70202,8 @@
 +		: :"a" (eax), "c" (ecx));
 +}
 +
++extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
++
 +/* from system description table in BIOS.  Mostly for MCA use, but
 +others may find it useful. */
 +extern unsigned int machine_id;
@@ -74874,6 +70373,7 @@
 +	.vm86_info = NULL,						\
 +	.sysenter_cs = __KERNEL_CS,					\
 +	.io_bitmap_ptr = NULL,						\
++	.gs = __KERNEL_PDA,						\
 +}
 +
 +#ifndef CONFIG_X86_NO_TSS
@@ -74890,25 +70390,11 @@
 +	.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,			\
 +	.io_bitmap	= { [ 0 ... IO_BITMAP_LONGS] = ~0 },		\
 +}
-+
-+static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
-+{
-+	tss->esp0 = thread->esp0;
-+	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
-+	if (unlikely(tss->ss1 != thread->sysenter_cs)) {
-+		tss->ss1 = thread->sysenter_cs;
-+		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
-+	}
-+}
-+#define load_esp0(tss, thread) \
-+	__load_esp0(tss, thread)
-+#else
-+#define load_esp0(tss, thread) \
-+	HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)
 +#endif
 +
 +#define start_thread(regs, new_eip, new_esp) do {		\
-+	__asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));	\
++	__asm__("movl %0,%%fs": :"r" (0));			\
++	regs->xgs = 0;						\
 +	set_fs(USER_DS);					\
 +	regs->xds = __USER_DS;					\
 +	regs->xes = __USER_DS;					\
@@ -74918,26 +70404,6 @@
 +	regs->esp = new_esp;					\
 +} while (0)
 +
-+/*
-+ * These special macros can be used to get or set a debugging register
-+ */
-+#define get_debugreg(var, register)				\
-+		(var) = HYPERVISOR_get_debugreg((register))
-+#define set_debugreg(value, register)			\
-+		HYPERVISOR_set_debugreg((register), (value))
-+
-+/*
-+ * Set IOPL bits in EFLAGS from given mask
-+ */
-+static inline void set_iopl_mask(unsigned mask)
-+{
-+	struct physdev_set_iopl set_iopl;
-+
-+	/* Force the change at ring 0. */
-+	set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
-+	HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
-+}
-+
 +/* Forward declaration, a strange C thing */
 +struct task_struct;
 +struct mm_struct;
@@ -75029,6 +70495,106 @@
 +
 +#define cpu_relax()	rep_nop()
 +
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#else
++#define paravirt_enabled() 0
++#define __cpuid native_cpuid
++
++#ifndef CONFIG_X86_NO_TSS
++static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
++{
++	tss->esp0 = thread->esp0;
++	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
++	if (unlikely(tss->ss1 != thread->sysenter_cs)) {
++		tss->ss1 = thread->sysenter_cs;
++		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
++	}
++}
++#define load_esp0(tss, thread) \
++	__load_esp0(tss, thread)
++#else
++#define load_esp0(tss, thread) \
++	HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)
++#endif
++
++
++/*
++ * These special macros can be used to get or set a debugging register
++ */
++#define get_debugreg(var, register)				\
++		(var) = HYPERVISOR_get_debugreg((register))
++#define set_debugreg(value, register)			\
++		HYPERVISOR_set_debugreg((register), (value))
++
++#define set_iopl_mask native_set_iopl_mask
++#endif /* CONFIG_PARAVIRT */
++
++/*
++ * Set IOPL bits in EFLAGS from given mask
++ */
++static fastcall inline void native_set_iopl_mask(unsigned mask)
++{
++	struct physdev_set_iopl set_iopl;
++
++	/* Force the change at ring 0. */
++	set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
++	HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++}
++
++/*
++ * Generic CPUID function
++ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
++ * resulting in stale register contents being returned.
++ */
++static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
++{
++	*eax = op;
++	*ecx = 0;
++	__cpuid(eax, ebx, ecx, edx);
++}
++
++/* Some CPUID calls want 'count' to be placed in ecx */
++static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
++			       int *edx)
++{
++	*eax = op;
++	*ecx = count;
++	__cpuid(eax, ebx, ecx, edx);
++}
++
++/*
++ * CPUID functions returning a single datum
++ */
++static inline unsigned int cpuid_eax(unsigned int op)
++{
++	unsigned int eax, ebx, ecx, edx;
++
++	cpuid(op, &eax, &ebx, &ecx, &edx);
++	return eax;
++}
++static inline unsigned int cpuid_ebx(unsigned int op)
++{
++	unsigned int eax, ebx, ecx, edx;
++
++	cpuid(op, &eax, &ebx, &ecx, &edx);
++	return ebx;
++}
++static inline unsigned int cpuid_ecx(unsigned int op)
++{
++	unsigned int eax, ebx, ecx, edx;
++
++	cpuid(op, &eax, &ebx, &ecx, &edx);
++	return ecx;
++}
++static inline unsigned int cpuid_edx(unsigned int op)
++{
++	unsigned int eax, ebx, ecx, edx;
++
++	cpuid(op, &eax, &ebx, &ecx, &edx);
++	return edx;
++}
++
 +/* generic versions from gas */
 +#define GENERIC_NOP1	".byte 0x90\n"
 +#define GENERIC_NOP2    	".byte 0x89,0xf6\n"
@@ -75128,104 +70694,14 @@
 +extern void enable_sep_cpu(void);
 +extern int sysenter_setup(void);
 +
-+#endif /* __ASM_I386_PROCESSOR_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/ptrace.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/ptrace.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/ptrace.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/ptrace.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,90 @@
-+#ifndef _I386_PTRACE_H
-+#define _I386_PTRACE_H
-+
-+#define EBX 0
-+#define ECX 1
-+#define EDX 2
-+#define ESI 3
-+#define EDI 4
-+#define EBP 5
-+#define EAX 6
-+#define DS 7
-+#define ES 8
-+#define FS 9
-+#define GS 10
-+#define ORIG_EAX 11
-+#define EIP 12
-+#define CS  13
-+#define EFL 14
-+#define UESP 15
-+#define SS   16
-+#define FRAME_SIZE 17
-+
-+/* this struct defines the way the registers are stored on the 
-+   stack during a system call. */
-+
-+struct pt_regs {
-+	long ebx;
-+	long ecx;
-+	long edx;
-+	long esi;
-+	long edi;
-+	long ebp;
-+	long eax;
-+	int  xds;
-+	int  xes;
-+	long orig_eax;
-+	long eip;
-+	int  xcs;
-+	long eflags;
-+	long esp;
-+	int  xss;
-+};
-+
-+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-+#define PTRACE_GETREGS            12
-+#define PTRACE_SETREGS            13
-+#define PTRACE_GETFPREGS          14
-+#define PTRACE_SETFPREGS          15
-+#define PTRACE_GETFPXREGS         18
-+#define PTRACE_SETFPXREGS         19
-+
-+#define PTRACE_OLDSETOPTIONS         21
-+
-+#define PTRACE_GET_THREAD_AREA    25
-+#define PTRACE_SET_THREAD_AREA    26
-+
-+#define PTRACE_SYSEMU		  31
-+#define PTRACE_SYSEMU_SINGLESTEP  32
-+
-+#ifdef __KERNEL__
-+
-+#include <asm/vm86.h>
-+
-+struct task_struct;
-+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
-+
-+/*
-+ * user_mode_vm(regs) determines whether a register set came from user mode.
-+ * This is true if V8086 mode was enabled OR if the register set was from
-+ * protected mode with RPL-3 CS value.  This tricky test checks that with
-+ * one comparison.  Many places in the kernel can bypass this full check
-+ * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
-+ */
-+static inline int user_mode(struct pt_regs *regs)
-+{
-+	return (regs->xcs & 2) != 0;
-+}
-+static inline int user_mode_vm(struct pt_regs *regs)
-+{
-+	return ((regs->xcs & 2) | (regs->eflags & VM_MASK)) != 0;
-+}
-+#define instruction_pointer(regs) ((regs)->eip)
-+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-+extern unsigned long profile_pc(struct pt_regs *regs);
-+#else
-+#define profile_pc(regs) instruction_pointer(regs)
-+#endif
-+#endif /* __KERNEL__ */
++extern int init_gdt(int cpu, struct task_struct *idle);
++extern void cpu_set_gdt(int);
++extern void secondary_cpu_init(void);
 +
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/scatterlist.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/scatterlist.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/scatterlist.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/scatterlist.h	2006-11-19 14:27:04.000000000 +0100
++#endif /* __ASM_I386_PROCESSOR_H */
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/scatterlist.h b/include/asm-i386/mach-xen/asm/scatterlist.h
+--- a/include/asm-i386/mach-xen/asm/scatterlist.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/scatterlist.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,22 @@
 +#ifndef _I386_SCATTERLIST_H
 +#define _I386_SCATTERLIST_H
@@ -75249,216 +70725,10 @@
 +#define ISA_DMA_THRESHOLD (0x00ffffff)
 +
 +#endif /* !(_I386_SCATTERLIST_H) */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/segment.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/segment.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/segment.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/segment.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,117 @@
-+#ifndef _ASM_SEGMENT_H
-+#define _ASM_SEGMENT_H
-+
-+/*
-+ * The layout of the per-CPU GDT under Linux:
-+ *
-+ *   0 - null
-+ *   1 - reserved
-+ *   2 - reserved
-+ *   3 - reserved
-+ *
-+ *   4 - unused			<==== new cacheline
-+ *   5 - unused
-+ *
-+ *  ------- start of TLS (Thread-Local Storage) segments:
-+ *
-+ *   6 - TLS segment #1			[ glibc's TLS segment ]
-+ *   7 - TLS segment #2			[ Wine's %fs Win32 segment ]
-+ *   8 - TLS segment #3
-+ *   9 - reserved
-+ *  10 - reserved
-+ *  11 - reserved
-+ *
-+ *  ------- start of kernel segments:
-+ *
-+ *  12 - kernel code segment		<==== new cacheline
-+ *  13 - kernel data segment
-+ *  14 - default user CS
-+ *  15 - default user DS
-+ *  16 - TSS
-+ *  17 - LDT
-+ *  18 - PNPBIOS support (16->32 gate)
-+ *  19 - PNPBIOS support
-+ *  20 - PNPBIOS support
-+ *  21 - PNPBIOS support
-+ *  22 - PNPBIOS support
-+ *  23 - APM BIOS support
-+ *  24 - APM BIOS support
-+ *  25 - APM BIOS support 
-+ *
-+ *  26 - ESPFIX small SS
-+ *  27 - unused
-+ *  28 - unused
-+ *  29 - unused
-+ *  30 - unused
-+ *  31 - TSS for double fault handler
-+ */
-+#define GDT_ENTRY_TLS_ENTRIES	3
-+#define GDT_ENTRY_TLS_MIN	6
-+#define GDT_ENTRY_TLS_MAX 	(GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
-+
-+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
-+
-+#define GDT_ENTRY_DEFAULT_USER_CS	14
-+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
-+
-+#define GDT_ENTRY_DEFAULT_USER_DS	15
-+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
-+
-+#define GDT_ENTRY_KERNEL_BASE	12
-+
-+#define GDT_ENTRY_KERNEL_CS		(GDT_ENTRY_KERNEL_BASE + 0)
-+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
-+#define GET_KERNEL_CS() (__KERNEL_CS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
-+
-+#define GDT_ENTRY_KERNEL_DS		(GDT_ENTRY_KERNEL_BASE + 1)
-+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
-+#define GET_KERNEL_DS() (__KERNEL_DS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
-+
-+#define GDT_ENTRY_TSS			(GDT_ENTRY_KERNEL_BASE + 4)
-+#define GDT_ENTRY_LDT			(GDT_ENTRY_KERNEL_BASE + 5)
-+
-+#define GDT_ENTRY_PNPBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 6)
-+#define GDT_ENTRY_APMBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 11)
-+
-+#define GDT_ENTRY_ESPFIX_SS		(GDT_ENTRY_KERNEL_BASE + 14)
-+#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
-+
-+#define GDT_ENTRY_DOUBLEFAULT_TSS	31
-+
-+/*
-+ * The GDT has 32 entries
-+ */
-+#define GDT_ENTRIES 32
-+
-+#define GDT_SIZE (GDT_ENTRIES * 8)
-+
-+/* Simple and small GDT entries for booting only */
-+
-+#define GDT_ENTRY_BOOT_CS		2
-+#define __BOOT_CS	(GDT_ENTRY_BOOT_CS * 8)
-+
-+#define GDT_ENTRY_BOOT_DS		(GDT_ENTRY_BOOT_CS + 1)
-+#define __BOOT_DS	(GDT_ENTRY_BOOT_DS * 8)
-+
-+/* The PnP BIOS entries in the GDT */
-+#define GDT_ENTRY_PNPBIOS_CS32		(GDT_ENTRY_PNPBIOS_BASE + 0)
-+#define GDT_ENTRY_PNPBIOS_CS16		(GDT_ENTRY_PNPBIOS_BASE + 1)
-+#define GDT_ENTRY_PNPBIOS_DS		(GDT_ENTRY_PNPBIOS_BASE + 2)
-+#define GDT_ENTRY_PNPBIOS_TS1		(GDT_ENTRY_PNPBIOS_BASE + 3)
-+#define GDT_ENTRY_PNPBIOS_TS2		(GDT_ENTRY_PNPBIOS_BASE + 4)
-+
-+/* The PnP BIOS selectors */
-+#define PNP_CS32   (GDT_ENTRY_PNPBIOS_CS32 * 8)	/* segment for calling fn */
-+#define PNP_CS16   (GDT_ENTRY_PNPBIOS_CS16 * 8)	/* code segment for BIOS */
-+#define PNP_DS     (GDT_ENTRY_PNPBIOS_DS * 8)	/* data segment for BIOS */
-+#define PNP_TS1    (GDT_ENTRY_PNPBIOS_TS1 * 8)	/* transfer data segment */
-+#define PNP_TS2    (GDT_ENTRY_PNPBIOS_TS2 * 8)	/* another data segment */
-+
-+/*
-+ * The interrupt descriptor table has room for 256 idt's,
-+ * the global descriptor table is dependent on the number
-+ * of tasks we can have..
-+ */
-+#define IDT_ENTRIES 256
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/setup.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/setup.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/setup.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/setup.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,81 @@
-+/*
-+ *	Just a place holder. We don't want to have to test x86 before
-+ *	we include stuff
-+ */
-+
-+#ifndef _i386_SETUP_H
-+#define _i386_SETUP_H
-+
-+#ifdef __KERNEL__
-+#include <linux/pfn.h>
-+
-+/*
-+ * Reserved space for vmalloc and iomap - defined in asm/page.h
-+ */
-+#define MAXMEM_PFN	PFN_DOWN(MAXMEM)
-+#define MAX_NONPAE_PFN	(1 << 20)
-+#endif
-+
-+#define PARAM_SIZE 4096
-+#define COMMAND_LINE_SIZE 256
-+
-+#define OLD_CL_MAGIC_ADDR	0x90020
-+#define OLD_CL_MAGIC		0xA33F
-+#define OLD_CL_BASE_ADDR	0x90000
-+#define OLD_CL_OFFSET		0x90022
-+#define NEW_CL_POINTER		0x228	/* Relative to real mode data */
-+
-+#ifndef __ASSEMBLY__
-+/*
-+ * This is set up by the setup-routine at boot-time
-+ */
-+extern unsigned char boot_params[PARAM_SIZE];
-+
-+#define PARAM	(boot_params)
-+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-+#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
-+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-+#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
-+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-+#define IST_INFO   (*(struct ist_info *) (PARAM+0x60))
-+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-+#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
-+#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
-+#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
-+#define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0)))
-+#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
-+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-+#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-+#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
-+#define INITRD_START (__pa(xen_start_info->mod_start))
-+#define INITRD_SIZE (xen_start_info->mod_len)
-+#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
-+#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
-+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-+#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
-+
-+/*
-+ * Do NOT EVER look at the BIOS memory size location.
-+ * It does not work on many machines.
-+ */
-+#define LOWMEMSIZE()	(0x9f000)
-+
-+struct e820entry;
-+
-+char * __init machine_specific_memory_setup(void);
-+
-+int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
-+int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
-+void __init add_memory_region(unsigned long long start,
-+			      unsigned long long size, int type);
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#endif /* _i386_SETUP_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/smp.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/smp.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/smp.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/smp.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,103 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/smp.h b/include/asm-i386/mach-xen/asm/smp.h
+--- a/include/asm-i386/mach-xen/asm/smp.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/smp.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,115 @@
 +#ifndef __ASM_SMP_H
 +#define __ASM_SMP_H
 +
@@ -75469,6 +70739,7 @@
 +#include <linux/kernel.h>
 +#include <linux/threads.h>
 +#include <linux/cpumask.h>
++#include <asm/pda.h>
 +#endif
 +
 +#ifdef CONFIG_X86_LOCAL_APIC
@@ -75517,7 +70788,7 @@
 + * from the initial startup. We map APIC_BASE very early in page_setup(),
 + * so this is correct in the x86 case.
 + */
-+#define raw_smp_processor_id() (current_thread_info()->cpu)
++#define raw_smp_processor_id() (read_pda(cpu_number))
 +
 +extern cpumask_t cpu_possible_map;
 +#define cpu_callin_map cpu_possible_map
@@ -75540,237 +70811,42 @@
 +	return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
 +}
 +#endif
-+
-+static __inline int logical_smp_processor_id(void)
-+{
-+	/* we don't want to mark this access volatile - bad code generation */
-+	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
-+}
-+
 +#endif
 +
++extern int safe_smp_processor_id(void);
 +extern int __cpu_disable(void);
 +extern void __cpu_die(unsigned int cpu);
++extern unsigned int num_processors;
 +extern void prefill_possible_map(void);
++
 +#endif /* !__ASSEMBLY__ */
 +
 +#else /* CONFIG_SMP */
 +
++#define safe_smp_processor_id()		0
 +#define cpu_physical_id(cpu)		boot_cpu_physical_apicid
 +
 +#define NO_PROC_ID		0xFF		/* No processor magic marker */
 +
 +#endif
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/spinlock.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/spinlock.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/spinlock.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/spinlock.h	2006-12-05 18:42:37.000000000 +0100
-@@ -0,0 +1,202 @@
-+#ifndef __ASM_SPINLOCK_H
-+#define __ASM_SPINLOCK_H
-+
-+#include <asm/atomic.h>
-+#include <asm/rwlock.h>
-+#include <asm/page.h>
-+#include <linux/compiler.h>
-+
-+/*
-+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
-+ *
-+ * Simple spin lock operations.  There are two variants, one clears IRQ's
-+ * on the local processor, one does not.
-+ *
-+ * We make no fairness assumptions. They have a cost.
-+ *
-+ * (the type definitions are in asm/spinlock_types.h)
-+ */
-+
-+#define __raw_spin_is_locked(x) \
-+		(*(volatile signed char *)(&(x)->slock) <= 0)
-+
-+#define __raw_spin_lock_string \
-+	"\n1:\t" \
-+	LOCK_PREFIX " ; decb %0\n\t" \
-+	"jns 3f\n" \
-+	"2:\t" \
-+	"rep;nop\n\t" \
-+	"cmpb $0,%0\n\t" \
-+	"jle 2b\n\t" \
-+	"jmp 1b\n" \
-+	"3:\n\t"
-+
-+/*
-+ * NOTE: there's an irqs-on section here, which normally would have to be
-+ * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
-+ * __raw_spin_lock_string_flags().
-+ */
-+#define __raw_spin_lock_string_flags \
-+	"\n1:\t" \
-+	LOCK_PREFIX " ; decb %0\n\t" \
-+	"jns 5f\n" \
-+	"2:\t" \
-+	"testl $0x200, %1\n\t" \
-+	"jz 4f\n\t" \
-+	"#sti\n" \
-+	"3:\t" \
-+	"rep;nop\n\t" \
-+	"cmpb $0, %0\n\t" \
-+	"jle 3b\n\t" \
-+	"#cli\n\t" \
-+	"jmp 1b\n" \
-+	"4:\t" \
-+	"rep;nop\n\t" \
-+	"cmpb $0, %0\n\t" \
-+	"jg 1b\n\t" \
-+	"jmp 4b\n" \
-+	"5:\n\t"
-+
-+static inline void __raw_spin_lock(raw_spinlock_t *lock)
-+{
-+	asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory");
-+}
-+
-+/*
-+ * It is easier for the lock validator if interrupts are not re-enabled
-+ * in the middle of a lock-acquire. This is a performance feature anyway
-+ * so we turn it off:
-+ */
-+#ifndef CONFIG_PROVE_LOCKING
-+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
-+{
-+	asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory");
-+}
-+#endif
-+
-+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
-+{
-+	char oldval;
-+	__asm__ __volatile__(
-+		"xchgb %b0,%1"
-+		:"=q" (oldval), "+m" (lock->slock)
-+		:"0" (0) : "memory");
-+	return oldval > 0;
-+}
-+
-+/*
-+ * __raw_spin_unlock based on writing $1 to the low byte.
-+ * This method works. Despite all the confusion.
-+ * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
-+ * (PPro errata 66, 92)
-+ */
-+
-+#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
-+
-+#define __raw_spin_unlock_string \
-+	"movb $1,%0" \
-+		:"+m" (lock->slock) : : "memory"
-+
-+
-+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
-+{
-+	__asm__ __volatile__(
-+		__raw_spin_unlock_string
-+	);
-+}
 +
-+#else
++#ifndef __ASSEMBLY__
 +
-+#define __raw_spin_unlock_string \
-+	"xchgb %b0, %1" \
-+		:"=q" (oldval), "+m" (lock->slock) \
-+		:"0" (oldval) : "memory"
++extern u8 apicid_2_node[];
 +
-+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
++#ifdef CONFIG_X86_LOCAL_APIC
++static __inline int logical_smp_processor_id(void)
 +{
-+	char oldval = 1;
-+
-+	__asm__ __volatile__(
-+		__raw_spin_unlock_string
-+	);
++	/* we don't want to mark this access volatile - bad code generation */
++	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
 +}
-+
++#endif
 +#endif
 +
-+#define __raw_spin_unlock_wait(lock) \
-+	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
-+
-+/*
-+ * Read-write spinlocks, allowing multiple readers
-+ * but only one writer.
-+ *
-+ * NOTE! it is quite common to have readers in interrupts
-+ * but no interrupt writers. For those circumstances we
-+ * can "mix" irq-safe locks - any writer needs to get a
-+ * irq-safe write-lock, but readers can get non-irqsafe
-+ * read-locks.
-+ *
-+ * On x86, we implement read-write locks as a 32-bit counter
-+ * with the high bit (sign) being the "contended" bit.
-+ *
-+ * The inline assembly is non-obvious. Think about it.
-+ *
-+ * Changed to use the same technique as rw semaphores.  See
-+ * semaphore.h for details.  -ben
-+ *
-+ * the helpers are in arch/i386/kernel/semaphore.c
-+ */
-+
-+/**
-+ * read_can_lock - would read_trylock() succeed?
-+ * @lock: the rwlock in question.
-+ */
-+#define __raw_read_can_lock(x)		((int)(x)->lock > 0)
-+
-+/**
-+ * write_can_lock - would write_trylock() succeed?
-+ * @lock: the rwlock in question.
-+ */
-+#define __raw_write_can_lock(x)		((x)->lock == RW_LOCK_BIAS)
-+
-+static inline void __raw_read_lock(raw_rwlock_t *rw)
-+{
-+	__build_read_lock(rw, "__read_lock_failed");
-+}
-+
-+static inline void __raw_write_lock(raw_rwlock_t *rw)
-+{
-+	__build_write_lock(rw, "__write_lock_failed");
-+}
-+
-+static inline int __raw_read_trylock(raw_rwlock_t *lock)
-+{
-+	atomic_t *count = (atomic_t *)lock;
-+	atomic_dec(count);
-+	if (atomic_read(count) >= 0)
-+		return 1;
-+	atomic_inc(count);
-+	return 0;
-+}
-+
-+static inline int __raw_write_trylock(raw_rwlock_t *lock)
-+{
-+	atomic_t *count = (atomic_t *)lock;
-+	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
-+		return 1;
-+	atomic_add(RW_LOCK_BIAS, count);
-+	return 0;
-+}
-+
-+static inline void __raw_read_unlock(raw_rwlock_t *rw)
-+{
-+	asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
-+}
-+
-+static inline void __raw_write_unlock(raw_rwlock_t *rw)
-+{
-+	asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
-+				 : "+m" (rw->lock) : : "memory");
-+}
-+
-+#endif /* __ASM_SPINLOCK_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/swiotlb.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/swiotlb.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/swiotlb.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/swiotlb.h	2006-11-19 14:27:04.000000000 +0100
++#endif
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/swiotlb.h b/include/asm-i386/mach-xen/asm/swiotlb.h
+--- a/include/asm-i386/mach-xen/asm/swiotlb.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/swiotlb.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,41 @@
 +#ifndef _ASM_SWIOTLB_H
 +#define _ASM_SWIOTLB_H 1
@@ -75813,9 +70889,9 @@
 +#endif
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/synch_bitops.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/synch_bitops.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/synch_bitops.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/synch_bitops.h b/include/asm-i386/mach-xen/asm/synch_bitops.h
+--- a/include/asm-i386/mach-xen/asm/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/synch_bitops.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,141 @@
 +#ifndef __XEN_SYNCH_BITOPS_H__
 +#define __XEN_SYNCH_BITOPS_H__
@@ -75958,10 +71034,10 @@
 +#define synch_cmpxchg_subword synch_cmpxchg
 +
 +#endif /* __XEN_SYNCH_BITOPS_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/system.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/system.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/system.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/system.h	2006-12-05 18:42:37.000000000 +0100
-@@ -0,0 +1,495 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/system.h b/include/asm-i386/mach-xen/asm/system.h
+--- a/include/asm-i386/mach-xen/asm/system.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/system.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,535 @@
 +#ifndef __ASM_SYSTEM_H
 +#define __ASM_SYSTEM_H
 +
@@ -76060,6 +71136,9 @@
 +#define savesegment(seg, value) \
 +	asm volatile("mov %%" #seg ",%0":"=rm" (value))
 +
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#else
 +#define read_cr0() ({ \
 +	unsigned int __dummy; \
 +	__asm__ __volatile__( \
@@ -76107,20 +71186,21 @@
 +		: "=r" (__dummy): "0" (0));	      \
 +	__dummy;				      \
 +})
-+
 +#define write_cr4(x) \
 +	__asm__ __volatile__("movl %0,%%cr4": :"r" (x))
-+/*
-+ * Clear and set 'TS' bit respectively
-+ */
++
++#define wbinvd() \
++	__asm__ __volatile__ ("wbinvd": : :"memory")
++
++/* Clear the 'TS' bit */
 +#define clts() (HYPERVISOR_fpu_taskswitch(0))
++#endif/* CONFIG_PARAVIRT */
++
++/* Set the 'TS' bit */
 +#define stts() (HYPERVISOR_fpu_taskswitch(1))
 +
 +#endif	/* __KERNEL__ */
 +
-+#define wbinvd() \
-+	__asm__ __volatile__ ("wbinvd": : :"memory")
-+
 +static inline unsigned long get_limit(unsigned long segment)
 +{
 +	unsigned long __limit;
@@ -76238,6 +71318,9 @@
 +#define cmpxchg(ptr,o,n)\
 +	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
 +					(unsigned long)(n),sizeof(*(ptr))))
++#define sync_cmpxchg(ptr,o,n)\
++	((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
++					(unsigned long)(n),sizeof(*(ptr))))
 +#endif
 +
 +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -76267,6 +71350,39 @@
 +	return old;
 +}
 +
++/*
++ * Always use locked operations when touching memory shared with a
++ * hypervisor, since the system may be SMP even if the guest kernel
++ * isn't.
++ */
++static inline unsigned long __sync_cmpxchg(volatile void *ptr,
++					    unsigned long old,
++					    unsigned long new, int size)
++{
++	unsigned long prev;
++	switch (size) {
++	case 1:
++		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 2:
++		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
++				     : "=a"(prev)
++				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 4:
++		__asm__ __volatile__("lock; cmpxchgl %1,%2"
++				     : "=a"(prev)
++				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	}
++	return old;
++}
++
 +#ifndef CONFIG_X86_CMPXCHG
 +/*
 + * Building a kernel capable running on 80386. It may be necessary to
@@ -76457,10 +71573,10 @@
 +void default_idle(void);
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/timer.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/timer.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/timer.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/timer.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,70 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/timer.h b/include/asm-i386/mach-xen/asm/timer.h
+--- a/include/asm-i386/mach-xen/asm/timer.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/timer.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,67 @@
 +#ifndef _ASMi386_TIMER_H
 +#define _ASMi386_TIMER_H
 +#include <linux/init.h>
@@ -76501,11 +71617,8 @@
 +extern struct timer_opts* __init select_timer(void);
 +extern void clock_fallback(void);
 +void setup_pit_timer(void);
-+
 +/* Modifiers for buggy PIT handling */
-+
 +extern int pit_latch_buggy;
-+
 +extern struct timer_opts *cur_timer;
 +extern int timer_ack;
 +
@@ -76531,26 +71644,31 @@
 +extern struct init_timer_opts timer_pmtmr_init;
 +#endif
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/tlbflush.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/tlbflush.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/tlbflush.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/tlbflush.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,101 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/asm/tlbflush.h b/include/asm-i386/mach-xen/asm/tlbflush.h
+--- a/include/asm-i386/mach-xen/asm/tlbflush.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/asm/tlbflush.h	2007-03-14 10:55:43.000000000 +0100
+@@ -0,0 +1,106 @@
 +#ifndef _I386_TLBFLUSH_H
 +#define _I386_TLBFLUSH_H
 +
 +#include <linux/mm.h>
 +#include <asm/processor.h>
 +
-+#define __flush_tlb() xen_tlb_flush()
-+#define __flush_tlb_global() xen_tlb_flush()
-+#define __flush_tlb_all() xen_tlb_flush()
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#else
++#define __flush_tlb() __native_flush_tlb()
++#define __flush_tlb_global() __native_flush_tlb_global()
++#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
++#endif
 +
-+extern unsigned long pgkern_mask;
++#define __native_flush_tlb() xen_tlb_flush()
++#define __native_flush_tlb_global() xen_tlb_flush()
++#define __native_flush_tlb_single(addr) xen_invlpg(addr)
++#define __flush_tlb_all() xen_tlb_flush()
 +
 +#define cpu_has_invlpg	(boot_cpu_data.x86 > 3)
 +
-+#define __flush_tlb_single(addr) xen_invlpg(addr)
-+
 +#define __flush_tlb_one(addr) __flush_tlb_single(addr)
 +
 +/*
@@ -76636,33 +71754,9 @@
 +}
 +
 +#endif /* _I386_TLBFLUSH_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/asm/vga.h linux-2.6.18-xen/include/asm-i386/mach-xen/asm/vga.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/asm/vga.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/asm/vga.h	2006-11-19 14:27:04.000000000 +0100
-@@ -0,0 +1,20 @@
-+/*
-+ *	Access to VGA videoram
-+ *
-+ *	(c) 1998 Martin Mares <mj at ucw.cz>
-+ */
-+
-+#ifndef _LINUX_ASM_VGA_H_
-+#define _LINUX_ASM_VGA_H_
-+
-+/*
-+ *	On the PC, we can just recalculate addresses and then
-+ *	access the videoram directly without any black magic.
-+ */
-+
-+#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
-+
-+#define vga_readb(x) (*(x))
-+#define vga_writeb(x,y) (*(y) = (x))
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/irq_vectors.h linux-2.6.18-xen/include/asm-i386/mach-xen/irq_vectors.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/irq_vectors.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/irq_vectors.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/irq_vectors.h b/include/asm-i386/mach-xen/irq_vectors.h
+--- a/include/asm-i386/mach-xen/irq_vectors.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/irq_vectors.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,125 @@
 +/*
 + * This file should contain #defines for all of the interrupt vector
@@ -76789,9 +71883,9 @@
 +#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
 +
 +#endif /* _ASM_IRQ_VECTORS_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/mach_traps.h linux-2.6.18-xen/include/asm-i386/mach-xen/mach_traps.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/mach_traps.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/mach_traps.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/mach_traps.h b/include/asm-i386/mach-xen/mach_traps.h
+--- a/include/asm-i386/mach-xen/mach_traps.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/mach_traps.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,33 @@
 +/*
 + *  include/asm-xen/asm-i386/mach-xen/mach_traps.h
@@ -76826,18 +71920,104 @@
 +static inline void reassert_nmi(void) {}
 +
 +#endif /* !_MACH_TRAPS_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/mach-xen/setup_arch.h linux-2.6.18-xen/include/asm-i386/mach-xen/setup_arch.h
---- linux-2.6.18.3/include/asm-i386/mach-xen/setup_arch.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-i386/mach-xen/setup_arch.h	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mach-xen/setup_arch.h b/include/asm-i386/mach-xen/setup_arch.h
+--- a/include/asm-i386/mach-xen/setup_arch.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-i386/mach-xen/setup_arch.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,5 @@
 +/* Hook to call BIOS initialisation function */
 +
 +#define ARCH_SETUP machine_specific_arch_setup();
 +
 +void __init machine_specific_arch_setup(void);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/pgtable-2level-defs.h linux-2.6.18-xen/include/asm-i386/pgtable-2level-defs.h
---- linux-2.6.18.3/include/asm-i386/pgtable-2level-defs.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-i386/pgtable-2level-defs.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h
+--- a/include/asm-i386/mmu.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-i386/mmu.h	2007-03-14 10:55:43.000000000 +0100
+@@ -13,6 +13,19 @@
+ 	struct semaphore sem;
+ 	void *ldt;
+ 	void *vdso;
++#ifdef CONFIG_XEN
++	int has_foreign_mappings;
++#endif
+ } mm_context_t;
+ 
++#ifdef CONFIG_XEN
++/* mm/memory.c:exit_mmap hook */
++extern void _arch_exit_mmap(struct mm_struct *mm);
++#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
++
++/* kernel/fork.c:dup_mmap hook */
++extern void _arch_dup_mmap(struct mm_struct *mm);
++#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
++#endif /* CONFIG_XEN */
++
+ #endif
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/pci.h b/include/asm-i386/pci.h
+--- a/include/asm-i386/pci.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-i386/pci.h	2007-03-14 10:55:43.000000000 +0100
+@@ -42,6 +42,27 @@
+ 
+ struct pci_dev;
+ 
++#ifdef CONFIG_SWIOTLB
++
++
++/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
++#define PCI_DMA_BUS_IS_PHYS	(0)
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
++	dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
++	__u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME)			\
++	((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
++	(((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME)			\
++	((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
++	(((PTR)->LEN_NAME) = (VAL))
++
++#else
++
+ /* The PCI address space does equal the physical memory
+  * address space.  The networking and block device layers use
+  * this boolean for bounce buffer decisions.
+@@ -56,6 +77,8 @@
+ #define pci_unmap_len(PTR, LEN_NAME)		(0)
+ #define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
+ 
++#endif
++
+ /* This is always fine. */
+ #define pci_dac_dma_supported(pci_dev, mask)	(1)
+ 
+@@ -110,10 +133,22 @@
+ 
+ #endif /* __KERNEL__ */
+ 
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++#include <xen/pcifront.h>
++#endif /* CONFIG_XEN_PCIDEV_FRONTEND */
++
+ /* implement the pci_ DMA API in terms of the generic device dma_ one */
+ #include <asm-generic/pci-dma-compat.h>
+ 
+ /* generic pci stuff */
+ #include <asm-generic/pci.h>
+ 
++#ifdef CONFIG_XEN
++/* On Xen we have to scan all functions since Xen hides bridges from
++ * us.  If a bridge is at fn=0 and that slot has a multifunction
++ * device, we won't find the additional devices without scanning all
++ * functions. */
++#undef pcibios_scan_all_fns
++#define pcibios_scan_all_fns(a, b)	1
++#endif /* CONFIG_XEN */
+ #endif /* __i386_PCI_H */
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/pgtable-2level-defs.h b/include/asm-i386/pgtable-2level-defs.h
+--- a/include/asm-i386/pgtable-2level-defs.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-i386/pgtable-2level-defs.h	2007-03-14 10:55:43.000000000 +0100
 @@ -1,6 +1,8 @@
  #ifndef _I386_PGTABLE_2LEVEL_DEFS_H
  #define _I386_PGTABLE_2LEVEL_DEFS_H
@@ -76847,21 +72027,85 @@
  /*
   * traditional i386 two-level paging structure:
   */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-i386/pgtable-3level-defs.h linux-2.6.18-xen/include/asm-i386/pgtable-3level-defs.h
---- linux-2.6.18.3/include/asm-i386/pgtable-3level-defs.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-i386/pgtable-3level-defs.h	2006-11-19 14:27:04.000000000 +0100
-@@ -1,6 +1,8 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/pgtable-3level-defs.h b/include/asm-i386/pgtable-3level-defs.h
+--- a/include/asm-i386/pgtable-3level-defs.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-i386/pgtable-3level-defs.h	2007-03-14 10:55:43.000000000 +0100
+@@ -1,6 +1,12 @@
  #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
  #define _I386_PGTABLE_3LEVEL_DEFS_H
  
++#ifdef CONFIG_XEN
++#define HAVE_SHARED_KERNEL_PMD 0
++#else
 +#define HAVE_SHARED_KERNEL_PMD 1
++#endif
 +
  /*
   * PGDIR_SHIFT determines what a top-level page table entry can map
   */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/agp.h linux-2.6.18-xen/include/asm-ia64/agp.h
---- linux-2.6.18.3/include/asm-ia64/agp.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/agp.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/segment.h b/include/asm-i386/segment.h
+--- a/include/asm-i386/segment.h	2007-03-16 18:49:38.000000000 +0100
++++ b/include/asm-i386/segment.h	2007-03-14 10:55:43.000000000 +0100
+@@ -87,7 +87,11 @@
+ #define GDT_SIZE (GDT_ENTRIES * 8)
+ 
+ /* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */
++#ifdef CONFIG_XEN
++#define SEGMENT_IS_FLAT_CODE(x)  ((x) == __USER_CS || (x) == (__KERNEL_CS | get_kernel_rpl()))
++#else
+ #define SEGMENT_IS_FLAT_CODE(x)  (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8)
++#endif
+ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+ #define SEGMENT_IS_PNP_CODE(x)   (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
+ 
+@@ -132,6 +136,10 @@
+ #define SEGMENT_GDT		0x0
+ 
+ #ifndef CONFIG_PARAVIRT
++#ifdef CONFIG_XEN
++#define get_kernel_rpl()   (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1)
++#else
+ #define get_kernel_rpl()  0
++#endif /* CONFIG_XEN */
+ #endif
+ #endif
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/setup.h b/include/asm-i386/setup.h
+--- a/include/asm-i386/setup.h	2007-03-16 18:49:38.000000000 +0100
++++ b/include/asm-i386/setup.h	2007-03-14 10:55:43.000000000 +0100
+@@ -53,9 +53,15 @@
+ #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
+ #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
+ #define KERNEL_START (*(unsigned long *) (PARAM+0x214))
++#ifdef CONFIG_XEN
++#define INITRD_START (__pa(xen_start_info->mod_start))
++#define INITRD_SIZE (xen_start_info->mod_len)
++#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
++#else
+ #define INITRD_START (*(unsigned long *) (PARAM+0x218))
+ #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
+ #define EDID_INFO   (*(struct edid_info *) (PARAM+0x140))
++#endif /* CONFIG_XEN */
+ #define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
+ #define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
+ #define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
+diff -x '.hg*' -x '.git*' -urN a/include/asm-i386/vga.h b/include/asm-i386/vga.h
+--- a/include/asm-i386/vga.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-i386/vga.h	2007-03-14 10:55:43.000000000 +0100
+@@ -12,7 +12,11 @@
+  *	access the videoram directly without any black magic.
+  */
+ 
++#ifdef CONFIG_XEN
++#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
++#else
+ #define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
++#endif
+ 
+ #define vga_readb(x) (*(x))
+ #define vga_writeb(x,y) (*(y) = (x))
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/agp.h b/include/asm-ia64/agp.h
+--- a/include/asm-ia64/agp.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-ia64/agp.h	2007-03-14 10:55:43.000000000 +0100
 @@ -19,13 +19,44 @@
  #define flush_agp_cache()		mb()
  
@@ -76907,9 +72151,9 @@
 +#endif /* CONFIG_XEN */
  
  #endif /* _ASM_IA64_AGP_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/dma-mapping.h linux-2.6.18-xen/include/asm-ia64/dma-mapping.h
---- linux-2.6.18.3/include/asm-ia64/dma-mapping.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/dma-mapping.h	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
+--- a/include/asm-ia64/dma-mapping.h	2007-03-16 18:49:38.000000000 +0100
++++ b/include/asm-ia64/dma-mapping.h	2007-03-14 10:55:43.000000000 +0100
 @@ -6,20 +6,67 @@
   *	David Mosberger-Tang <davidm at hpl.hp.com>
   */
@@ -77000,9 +72244,9 @@
  
  static inline int
  dma_set_mask (struct device *dev, u64 mask)
-@@ -61,4 +110,29 @@
+@@ -62,4 +111,29 @@
  
- #define dma_is_consistent(dma_handle)	(1)	/* all we do is coherent memory... */
+ #define dma_is_consistent(d, h)	(1)	/* all we do is coherent memory... */
  
 +#ifdef CONFIG_XEN
 +/* arch/i386/kernel/swiotlb.o requires */
@@ -77030,15 +72274,15 @@
 +}
 +
  #endif /* _ASM_IA64_DMA_MAPPING_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/fixmap.h linux-2.6.18-xen/include/asm-ia64/fixmap.h
---- linux-2.6.18.3/include/asm-ia64/fixmap.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-ia64/fixmap.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/fixmap.h b/include/asm-ia64/fixmap.h
+--- a/include/asm-ia64/fixmap.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-ia64/fixmap.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,2 @@
 +#define clear_fixmap(x)	do {} while (0)
 +#define	set_fixmap(x,y)	do {} while (0)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/gcc_intrin.h linux-2.6.18-xen/include/asm-ia64/gcc_intrin.h
---- linux-2.6.18.3/include/asm-ia64/gcc_intrin.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/gcc_intrin.h	2006-11-19 14:27:04.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
+--- a/include/asm-ia64/gcc_intrin.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-ia64/gcc_intrin.h	2007-03-14 10:55:43.000000000 +0100
 @@ -26,7 +26,7 @@
  
  register unsigned long ia64_r13 asm ("r13") __attribute_used__;
@@ -77228,9 +72472,9 @@
 +#define __ia64_get_psr_i()	(__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
 +
  #endif /* _ASM_IA64_GCC_INTRIN_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/hw_irq.h linux-2.6.18-xen/include/asm-ia64/hw_irq.h
---- linux-2.6.18.3/include/asm-ia64/hw_irq.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/hw_irq.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
+--- a/include/asm-ia64/hw_irq.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-ia64/hw_irq.h	2007-03-14 10:55:43.000000000 +0100
 @@ -15,7 +15,11 @@
  #include <asm/ptrace.h>
  #include <asm/smp.h>
@@ -77256,9 +72500,9 @@
  	platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
  }
  
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/hypercall.h linux-2.6.18-xen/include/asm-ia64/hypercall.h
---- linux-2.6.18.3/include/asm-ia64/hypercall.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-ia64/hypercall.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/hypercall.h b/include/asm-ia64/hypercall.h
+--- a/include/asm-ia64/hypercall.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-ia64/hypercall.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,423 @@
 +/******************************************************************************
 + * hypercall.h
@@ -77683,9 +72927,9 @@
 +#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
 +
 +#endif /* __HYPERCALL_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/hypervisor.h linux-2.6.18-xen/include/asm-ia64/hypervisor.h
---- linux-2.6.18.3/include/asm-ia64/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-ia64/hypervisor.h	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/hypervisor.h b/include/asm-ia64/hypervisor.h
+--- a/include/asm-ia64/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-ia64/hypervisor.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,219 @@
 +/******************************************************************************
 + * hypervisor.h
@@ -77906,9 +73150,9 @@
 +#endif
 +
 +#endif /* __HYPERVISOR_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/intel_intrin.h linux-2.6.18-xen/include/asm-ia64/intel_intrin.h
---- linux-2.6.18.3/include/asm-ia64/intel_intrin.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/intel_intrin.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h
+--- a/include/asm-ia64/intel_intrin.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-ia64/intel_intrin.h	2007-03-14 10:55:43.000000000 +0100
 @@ -16,8 +16,10 @@
  		 	 * intrinsic
  		 	 */
@@ -78032,9 +73276,9 @@
  #define __builtin_trap()	__break(0);
  
  #endif /* _ASM_IA64_INTEL_INTRIN_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/io.h linux-2.6.18-xen/include/asm-ia64/io.h
---- linux-2.6.18.3/include/asm-ia64/io.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/io.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/io.h b/include/asm-ia64/io.h
+--- a/include/asm-ia64/io.h	2007-03-16 18:49:38.000000000 +0100
++++ b/include/asm-ia64/io.h	2007-03-14 10:55:43.000000000 +0100
 @@ -66,9 +66,11 @@
  #define PIO_RESERVED		__IA64_UNCACHED_OFFSET
  #define HAVE_ARCH_PIO_SIZE
@@ -78089,9 +73333,9 @@
  
  # endif /* KERNEL */
  
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/iosapic.h linux-2.6.18-xen/include/asm-ia64/iosapic.h
---- linux-2.6.18.3/include/asm-ia64/iosapic.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/iosapic.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h
+--- a/include/asm-ia64/iosapic.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-ia64/iosapic.h	2007-03-14 10:55:43.000000000 +0100
 @@ -53,6 +53,7 @@
  
  #define NR_IOSAPICS			256
@@ -78108,9 +73352,9 @@
  
  static inline void iosapic_eoi(char __iomem *iosapic, u32 vector)
  {
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/irq.h linux-2.6.18-xen/include/asm-ia64/irq.h
---- linux-2.6.18.3/include/asm-ia64/irq.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/irq.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
+--- a/include/asm-ia64/irq.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-ia64/irq.h	2007-03-14 10:55:43.000000000 +0100
 @@ -11,8 +11,39 @@
   * 02/29/00     D.Mosberger	moved most things into hw_irq.h
   */
@@ -78151,9 +73395,25 @@
  
  static __inline__ int
  irq_canonicalize (int irq)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/machvec_dig.h linux-2.6.18-xen/include/asm-ia64/machvec_dig.h
---- linux-2.6.18.3/include/asm-ia64/machvec_dig.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/machvec_dig.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/libata-portmap.h b/include/asm-ia64/libata-portmap.h
+--- a/include/asm-ia64/libata-portmap.h	2007-03-16 18:49:38.000000000 +0100
++++ b/include/asm-ia64/libata-portmap.h	1970-01-01 01:00:00.000000000 +0100
+@@ -1,12 +0,0 @@
+-#ifndef __ASM_IA64_LIBATA_PORTMAP_H
+-#define __ASM_IA64_LIBATA_PORTMAP_H
+-
+-#define ATA_PRIMARY_CMD		0x1F0
+-#define ATA_PRIMARY_CTL		0x3F6
+-#define ATA_PRIMARY_IRQ(dev)	isa_irq_to_vector(14)
+-
+-#define ATA_SECONDARY_CMD	0x170
+-#define ATA_SECONDARY_CTL	0x376
+-#define ATA_SECONDARY_IRQ(dev)	isa_irq_to_vector(15)
+-
+-#endif
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/machvec_dig.h b/include/asm-ia64/machvec_dig.h
+--- a/include/asm-ia64/machvec_dig.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-ia64/machvec_dig.h	2007-03-14 10:55:43.000000000 +0100
 @@ -13,4 +13,19 @@
  #define platform_name		"dig"
  #define platform_setup		dig_setup
@@ -78174,9 +73434,9 @@
 +#endif
 +
  #endif /* _ASM_IA64_MACHVEC_DIG_h */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/maddr.h linux-2.6.18-xen/include/asm-ia64/maddr.h
---- linux-2.6.18.3/include/asm-ia64/maddr.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-ia64/maddr.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/maddr.h b/include/asm-ia64/maddr.h
+--- a/include/asm-ia64/maddr.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-ia64/maddr.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,92 @@
 +#ifndef _ASM_IA64_MADDR_H
 +#define _ASM_IA64_MADDR_H
@@ -78270,29 +73530,29 @@
 +typedef unsigned long maddr_t;	// to compile netback, netfront
 +
 +#endif /* _ASM_IA64_MADDR_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/meminit.h linux-2.6.18-xen/include/asm-ia64/meminit.h
---- linux-2.6.18.3/include/asm-ia64/meminit.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/meminit.h	2006-11-19 14:27:05.000000000 +0100
-@@ -16,10 +16,15 @@
-  * 	- command line string
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
+--- a/include/asm-ia64/meminit.h	2007-03-16 18:49:38.000000000 +0100
++++ b/include/asm-ia64/meminit.h	2007-03-14 10:55:43.000000000 +0100
+@@ -17,10 +17,15 @@
   * 	- kernel code & data
+  * 	- crash dumping code reserved region
   * 	- Kernel memory map built from EFI memory map
 + *	- xen start info
   *
   * More could be added if necessary
   */
 +#ifndef CONFIG_XEN
- #define IA64_MAX_RSVD_REGIONS 6
+ #define IA64_MAX_RSVD_REGIONS 7
 +#else
-+#define IA64_MAX_RSVD_REGIONS 7
++#define IA64_MAX_RSVD_REGIONS 8
 +#endif
  
  struct rsvd_region {
  	unsigned long start;	/* virtual address of beginning of element */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/page.h linux-2.6.18-xen/include/asm-ia64/page.h
---- linux-2.6.18.3/include/asm-ia64/page.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/page.h	2006-11-19 14:27:05.000000000 +0100
-@@ -126,7 +126,9 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/page.h b/include/asm-ia64/page.h
+--- a/include/asm-ia64/page.h	2007-03-16 18:49:38.000000000 +0100
++++ b/include/asm-ia64/page.h	2007-03-14 10:55:43.000000000 +0100
+@@ -125,7 +125,9 @@
  # define pfn_valid(pfn)		(((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
  #endif
  
@@ -78302,7 +73562,7 @@
  #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
  
-@@ -227,5 +229,53 @@
+@@ -226,5 +228,53 @@
  					 (((current->personality & READ_IMPLIES_EXEC) != 0)	\
  					  ? VM_EXEC : 0))
  
@@ -78357,10 +73617,10 @@
 +#endif /* __ASSEMBLY__ */
 +#endif /* __KERNEL__ */
  #endif /* _ASM_IA64_PAGE_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/pal.h linux-2.6.18-xen/include/asm-ia64/pal.h
---- linux-2.6.18.3/include/asm-ia64/pal.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/pal.h	2006-11-19 14:27:05.000000000 +0100
-@@ -82,6 +82,7 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
+--- a/include/asm-ia64/pal.h	2007-03-16 18:49:39.000000000 +0100
++++ b/include/asm-ia64/pal.h	2007-03-14 10:55:43.000000000 +0100
+@@ -92,6 +92,7 @@
  #ifndef __ASSEMBLY__
  
  #include <linux/types.h>
@@ -78368,9 +73628,9 @@
  #include <asm/fpu.h>
  
  /*
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/pgalloc.h linux-2.6.18-xen/include/asm-ia64/pgalloc.h
---- linux-2.6.18.3/include/asm-ia64/pgalloc.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/pgalloc.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
+--- a/include/asm-ia64/pgalloc.h	2007-03-16 18:49:39.000000000 +0100
++++ b/include/asm-ia64/pgalloc.h	2007-03-14 10:55:43.000000000 +0100
 @@ -125,7 +125,11 @@
  static inline void
  pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
@@ -78383,9 +73643,9 @@
  }
  
  static inline void
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/privop.h linux-2.6.18-xen/include/asm-ia64/privop.h
---- linux-2.6.18.3/include/asm-ia64/privop.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-ia64/privop.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/privop.h b/include/asm-ia64/privop.h
+--- a/include/asm-ia64/privop.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-ia64/privop.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,59 @@
 +#ifndef _ASM_IA64_PRIVOP_H
 +#define _ASM_IA64_PRIVOP_H
@@ -78446,9 +73706,9 @@
 +#endif /* !__ASSEMBLY */
 +
 +#endif /* _ASM_IA64_PRIVOP_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/processor.h linux-2.6.18-xen/include/asm-ia64/processor.h
---- linux-2.6.18.3/include/asm-ia64/processor.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/processor.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
+--- a/include/asm-ia64/processor.h	2007-03-16 18:49:39.000000000 +0100
++++ b/include/asm-ia64/processor.h	2007-03-14 10:55:43.000000000 +0100
 @@ -18,6 +18,7 @@
  #include <asm/kregs.h>
  #include <asm/ptrace.h>
@@ -78456,10 +73716,10 @@
 +#include <asm/privop.h>
  
  #define IA64_NUM_DBG_REGS	8
- /*
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/synch_bitops.h linux-2.6.18-xen/include/asm-ia64/synch_bitops.h
---- linux-2.6.18.3/include/asm-ia64/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-ia64/synch_bitops.h	2006-11-19 14:27:05.000000000 +0100
+ 
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/synch_bitops.h b/include/asm-ia64/synch_bitops.h
+--- a/include/asm-ia64/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-ia64/synch_bitops.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,61 @@
 +#ifndef __XEN_SYNCH_BITOPS_H__
 +#define __XEN_SYNCH_BITOPS_H__
@@ -78522,9 +73782,9 @@
 +#define synch_cmpxchg_subword synch_cmpxchg
 +
 +#endif /* __XEN_SYNCH_BITOPS_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/system.h linux-2.6.18-xen/include/asm-ia64/system.h
---- linux-2.6.18.3/include/asm-ia64/system.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-ia64/system.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/system.h b/include/asm-ia64/system.h
+--- a/include/asm-ia64/system.h	2007-03-12 21:58:14.000000000 +0100
++++ b/include/asm-ia64/system.h	2007-03-14 10:55:43.000000000 +0100
 @@ -123,7 +123,7 @@
  #define __local_irq_save(x)			\
  do {						\
@@ -78543,9 +73803,9 @@
  
  #define irqs_disabled()				\
  ({						\
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-ia64/xen/privop.h linux-2.6.18-xen/include/asm-ia64/xen/privop.h
---- linux-2.6.18.3/include/asm-ia64/xen/privop.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-ia64/xen/privop.h	2006-11-19 14:27:05.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-ia64/xen/privop.h b/include/asm-ia64/xen/privop.h
+--- a/include/asm-ia64/xen/privop.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-ia64/xen/privop.h	2007-03-14 10:55:43.000000000 +0100
 @@ -0,0 +1,304 @@
 +#ifndef _ASM_IA64_XEN_PRIVOP_H
 +#define _ASM_IA64_XEN_PRIVOP_H
@@ -78851,9 +74111,9 @@
 +#define	ia64_pal_call_static		xen_pal_call_static
 +
 +#endif /* _ASM_IA64_XEN_PRIVOP_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-um/page.h linux-2.6.18-xen/include/asm-um/page.h
---- linux-2.6.18.3/include/asm-um/page.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-um/page.h	2006-11-19 14:27:09.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-um/page.h b/include/asm-um/page.h
+--- a/include/asm-um/page.h	2007-03-12 21:58:15.000000000 +0100
++++ b/include/asm-um/page.h	2007-03-14 10:55:49.000000000 +0100
 @@ -114,7 +114,7 @@
  extern struct page *arch_validate(struct page *page, gfp_t mask, int order);
  #define HAVE_ARCH_VALIDATE
@@ -78863,12 +74123,12 @@
  #define HAVE_ARCH_FREE_PAGE
  
  #include <asm-generic/memory_model.h>
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/apic.h linux-2.6.18-xen/include/asm-x86_64/apic.h
---- linux-2.6.18.3/include/asm-x86_64/apic.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-x86_64/apic.h	2006-11-19 14:27:10.000000000 +0100
-@@ -98,11 +98,13 @@
- extern int disable_timer_pin_1;
- 
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
+--- a/include/asm-x86_64/apic.h	2007-03-16 18:49:42.000000000 +0100
++++ b/include/asm-x86_64/apic.h	2007-03-14 10:55:49.000000000 +0100
+@@ -95,11 +95,13 @@
+ #define K8_APIC_EXT_INT_MSG_EXT 0x7
+ #define K8_APIC_EXT_LVT_ENTRY_THRESHOLD    0
  
 +#ifndef CONFIG_XEN
  void smp_send_timer_broadcast_ipi(void);
@@ -78878,29 +74138,140 @@
  #define ARCH_APICTIMER_STOPS_ON_C3	1
 +#endif
  
- #endif /* CONFIG_X86_LOCAL_APIC */
+ extern unsigned boot_cpu_id;
+ 
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/bootsetup.h b/include/asm-x86_64/bootsetup.h
+--- a/include/asm-x86_64/bootsetup.h	2007-03-15 15:56:09.000000000 +0100
++++ b/include/asm-x86_64/bootsetup.h	2007-03-14 10:55:49.000000000 +0100
+@@ -24,9 +24,15 @@
+ #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
+ #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
+ #define KERNEL_START (*(unsigned int *) (PARAM+0x214))
++#ifdef CONFIG_XEN
++#define INITRD_START (__pa(xen_start_info->mod_start))
++#define INITRD_SIZE (xen_start_info->mod_len)
++#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
++#else
+ #define INITRD_START (*(unsigned int *) (PARAM+0x218))
+ #define INITRD_SIZE (*(unsigned int *) (PARAM+0x21c))
+ #define EDID_INFO (*(struct edid_info *) (PARAM+0x140))
++#endif
+ #define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
+ #define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
+ #define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
+--- a/include/asm-x86_64/dma-mapping.h	2007-03-16 18:49:42.000000000 +0100
++++ b/include/asm-x86_64/dma-mapping.h	2007-03-14 10:55:49.000000000 +0100
+@@ -55,6 +55,7 @@
+ extern struct dma_mapping_ops* dma_ops;
+ extern int iommu_merge;
+ 
++#ifndef CONFIG_XEN
+ static inline int dma_mapping_error(dma_addr_t dma_addr)
+ {
+ 	if (dma_ops->mapping_error)
+@@ -195,6 +196,10 @@
+ }
+ 
+ extern struct device fallback_dev;
++#else
++#include <asm-i386/mach-xen/asm/dma-mapping.h>
++#endif /* CONFIG_XEN */
++
+ extern int panic_on_overflow;
+ 
+ #endif /* _X8664_DMA_MAPPING_H */
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/dmi.h b/include/asm-x86_64/dmi.h
+--- a/include/asm-x86_64/dmi.h	2007-03-12 21:58:16.000000000 +0100
++++ b/include/asm-x86_64/dmi.h	2007-03-14 10:55:49.000000000 +0100
+@@ -5,6 +5,10 @@
+ 
+ extern void *dmi_ioremap(unsigned long addr, unsigned long size);
+ extern void dmi_iounmap(void *addr, unsigned long size);
++#ifdef CONFIG_XEN
++extern void *bt_ioremap(unsigned long addr, unsigned long size);
++extern void bt_iounmap(void *addr, unsigned long size);
++#endif
+ 
+ #define DMI_MAX_DATA 2048
+ 
+@@ -21,7 +25,12 @@
+ 	return dmi_alloc_data + idx;
+ }
+ 
++#ifdef CONFIG_XEN
++#define dmi_ioremap bt_ioremap
++#define dmi_iounmap bt_iounmap
++#else
+ #define dmi_ioremap early_ioremap
+ #define dmi_iounmap early_iounmap
++#endif
+ 
+ #endif
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h
+--- a/include/asm-x86_64/dwarf2.h	2007-03-16 18:49:42.000000000 +0100
++++ b/include/asm-x86_64/dwarf2.h	2007-03-14 10:55:49.000000000 +0100
+@@ -13,7 +13,7 @@
+    away for older version. 
+  */
+ 
+-#ifdef CONFIG_AS_CFI
++#ifdef CONFIG_UNWIND_INFO
+ 
+ #define CFI_STARTPROC .cfi_startproc
+ #define CFI_ENDPROC .cfi_endproc
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
+--- a/include/asm-x86_64/e820.h	2007-03-16 18:49:42.000000000 +0100
++++ b/include/asm-x86_64/e820.h	2007-03-14 10:55:49.000000000 +0100
+@@ -41,13 +41,21 @@
+ extern void setup_memory_region(void);
+ extern void contig_e820_setup(void); 
+ extern unsigned long e820_end_of_ram(void);
++#ifdef CONFIG_XEN
++extern void e820_reserve_resources(struct e820entry *e820, int nr_map);
++#else
+ extern void e820_reserve_resources(void);
++#endif
+ extern void e820_mark_nosave_regions(void);
+ extern void e820_print_map(char *who);
+ extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
+ extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
+ 
++#ifdef CONFIG_XEN
++extern void e820_setup_gap(struct e820entry *e820, int nr_map);
++#else
+ extern void e820_setup_gap(void);
++#endif
+ extern void e820_register_active_regions(int nid,
+ 				unsigned long start_pfn, unsigned long end_pfn);
  
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/ipi.h linux-2.6.18-xen/include/asm-x86_64/ipi.h
---- linux-2.6.18.3/include/asm-x86_64/ipi.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/asm-x86_64/ipi.h	2006-12-05 18:42:37.000000000 +0100
-@@ -49,6 +49,7 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h
+--- a/include/asm-x86_64/ipi.h	2007-03-12 21:58:16.000000000 +0100
++++ b/include/asm-x86_64/ipi.h	2007-03-14 10:55:49.000000000 +0100
+@@ -49,8 +49,12 @@
  	return SET_APIC_DEST_FIELD(mask);
  }
  
-+#ifndef CONFIG_XEN_UNPRIVILEGED_GUEST
++
  static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
  {
++#ifdef CONFIG_XEN_UNPRIVILEGED_GUEST
++	BUG();
++#else
  	/*
-@@ -113,5 +114,6 @@
- 	}
- 	local_irq_restore(flags);
+ 	 * Subtle. In the case of the 'never do double writes' workaround
+ 	 * we have to lock out interrupts to be safe.  As we don't care
+@@ -74,6 +78,7 @@
+ 	 * Send the IPI. The write to APIC_ICR fires this off.
+ 	 */
+ 	apic_write(APIC_ICR, cfg);
++#endif /* !CONFIG_XEN_UNPRIVILEGED_GUEST */
  }
-+#endif /* CONFIG_XEN_UNPRIVILEGED_GUEST */
  
- #endif /* __ASM_IPI_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/arch_hooks.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/arch_hooks.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/arch_hooks.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/arch_hooks.h	2006-11-19 14:27:11.000000000 +0100
+ 
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/arch_hooks.h b/include/asm-x86_64/mach-xen/asm/arch_hooks.h
+--- a/include/asm-x86_64/mach-xen/asm/arch_hooks.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/arch_hooks.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,27 @@
 +#ifndef _ASM_ARCH_HOOKS_H
 +#define _ASM_ARCH_HOOKS_H
@@ -78918,7 +74289,7 @@
 +extern void init_ISA_irqs(void);
 +extern void apic_intr_init(void);
 +extern void smp_intr_init(void);
-+extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
++extern irqreturn_t timer_interrupt(int irq, void *dev_id);
 +
 +/* these are the defined hooks */
 +extern void intr_init_hook(void);
@@ -78929,56 +74300,10 @@
 +extern void mca_nmi_hook(void);
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/bootsetup.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/bootsetup.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/bootsetup.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/bootsetup.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,42 @@
-+
-+#ifndef _X86_64_BOOTSETUP_H
-+#define _X86_64_BOOTSETUP_H 1
-+
-+#define BOOT_PARAM_SIZE		4096
-+extern char x86_boot_params[BOOT_PARAM_SIZE];
-+
-+/*
-+ * This is set up by the setup-routine at boot-time
-+ */
-+#define PARAM	((unsigned char *)x86_boot_params)
-+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-+#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
-+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-+#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
-+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-+#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-+#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
-+
-+#define INITRD_START (__pa(xen_start_info->mod_start))
-+#define INITRD_SIZE (xen_start_info->mod_len)
-+#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
-+
-+#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
-+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-+#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
-+#define COMMAND_LINE saved_command_line
-+
-+#define RAMDISK_IMAGE_START_MASK  	0x07FF
-+#define RAMDISK_PROMPT_FLAG		0x8000
-+#define RAMDISK_LOAD_FLAG		0x4000	
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/desc.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/desc.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/desc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/desc.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,263 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/desc.h b/include/asm-x86_64/mach-xen/asm/desc.h
+--- a/include/asm-x86_64/mach-xen/asm/desc.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/desc.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,212 @@
 +/* Written 2000 by Andi Kleen */ 
 +#ifndef __ARCH_DESC_H
 +#define __ARCH_DESC_H
@@ -78990,66 +74315,15 @@
 +
 +#include <linux/string.h>
 +#include <linux/smp.h>
++#include <asm/desc_defs.h>
 +
 +#include <asm/segment.h>
 +#include <asm/mmu.h>
 +
-+// 8 byte segment descriptor
-+struct desc_struct { 
-+	u16 limit0;
-+	u16 base0;
-+	unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
-+	unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
-+} __attribute__((packed)); 
-+
-+struct n_desc_struct { 
-+	unsigned int a,b;
-+}; 	
-+
-+enum { 
-+	GATE_INTERRUPT = 0xE, 
-+	GATE_TRAP = 0xF, 	
-+	GATE_CALL = 0xC,
-+}; 	
-+
-+// 16byte gate
-+struct gate_struct {          
-+	u16 offset_low;
-+	u16 segment; 
-+	unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
-+	u16 offset_middle;
-+	u32 offset_high;
-+	u32 zero1; 
-+} __attribute__((packed));
-+
-+#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) 
-+#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
-+#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
-+
-+enum { 
-+	DESC_TSS = 0x9,
-+	DESC_LDT = 0x2,
-+}; 
-+
-+// LDT or TSS descriptor in the GDT. 16 bytes.
-+struct ldttss_desc { 
-+	u16 limit0;
-+	u16 base0;
-+	unsigned base1 : 8, type : 5, dpl : 2, p : 1;
-+	unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
-+	u32 base3;
-+	u32 zero1; 
-+} __attribute__((packed)); 
-+
-+struct desc_ptr {
-+	unsigned short size;
-+	unsigned long address;
-+} __attribute__((packed)) ;
++extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
 +
 +extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
 +
-+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
-+
 +#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
 +#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
 +
@@ -79147,7 +74421,7 @@
 +	 * -1? seg base+limit should be pointing to the address of the
 +	 * last valid byte
 +	 */
-+	set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS], 
++	set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
 +		(unsigned long)addr, DESC_TSS,
 +		IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
 +} 
@@ -79242,322 +74516,10 @@
 +#endif /* !__ASSEMBLY__ */
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/dma-mapping.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/dma-mapping.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/dma-mapping.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/dma-mapping.h	2006-12-05 18:42:37.000000000 +0100
-@@ -0,0 +1,207 @@
-+#ifndef _X8664_DMA_MAPPING_H
-+#define _X8664_DMA_MAPPING_H 1
-+
-+/*
-+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
-+ * documentation.
-+ */
-+
-+
-+#include <asm/scatterlist.h>
-+#include <asm/io.h>
-+#include <asm/swiotlb.h>
-+
-+struct dma_mapping_ops {
-+	int             (*mapping_error)(dma_addr_t dma_addr);
-+	void*           (*alloc_coherent)(struct device *dev, size_t size,
-+                                dma_addr_t *dma_handle, gfp_t gfp);
-+	void            (*free_coherent)(struct device *dev, size_t size,
-+                                void *vaddr, dma_addr_t dma_handle);
-+	dma_addr_t      (*map_single)(struct device *hwdev, void *ptr,
-+                                size_t size, int direction);
-+	/* like map_single, but doesn't check the device mask */
-+	dma_addr_t      (*map_simple)(struct device *hwdev, char *ptr,
-+                                size_t size, int direction);
-+	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
-+		                size_t size, int direction);
-+	void            (*sync_single_for_cpu)(struct device *hwdev,
-+		                dma_addr_t dma_handle, size_t size,
-+				int direction);
-+	void            (*sync_single_for_device)(struct device *hwdev,
-+                                dma_addr_t dma_handle, size_t size,
-+				int direction);
-+	void            (*sync_single_range_for_cpu)(struct device *hwdev,
-+                                dma_addr_t dma_handle, unsigned long offset,
-+		                size_t size, int direction);
-+	void            (*sync_single_range_for_device)(struct device *hwdev,
-+				dma_addr_t dma_handle, unsigned long offset,
-+		                size_t size, int direction);
-+	void            (*sync_sg_for_cpu)(struct device *hwdev,
-+                                struct scatterlist *sg, int nelems,
-+				int direction);
-+	void            (*sync_sg_for_device)(struct device *hwdev,
-+				struct scatterlist *sg, int nelems,
-+				int direction);
-+	int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
-+		                int nents, int direction);
-+	void            (*unmap_sg)(struct device *hwdev,
-+				struct scatterlist *sg, int nents,
-+				int direction);
-+	int             (*dma_supported)(struct device *hwdev, u64 mask);
-+	int		is_phys;
-+};
-+
-+extern dma_addr_t bad_dma_address;
-+extern struct dma_mapping_ops* dma_ops;
-+extern int iommu_merge;
-+
-+#if 0
-+static inline int valid_dma_direction(int dma_direction)
-+{
-+	return ((dma_direction == DMA_BIDIRECTIONAL) ||
-+		(dma_direction == DMA_TO_DEVICE) ||
-+		(dma_direction == DMA_FROM_DEVICE));
-+}
-+
-+static inline int dma_mapping_error(dma_addr_t dma_addr)
-+{
-+	if (dma_ops->mapping_error)
-+		return dma_ops->mapping_error(dma_addr);
-+
-+	return (dma_addr == bad_dma_address);
-+}
-+
-+extern void *dma_alloc_coherent(struct device *dev, size_t size,
-+				dma_addr_t *dma_handle, gfp_t gfp);
-+extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-+			      dma_addr_t dma_handle);
-+
-+static inline dma_addr_t
-+dma_map_single(struct device *hwdev, void *ptr, size_t size,
-+	       int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	return dma_ops->map_single(hwdev, ptr, size, direction);
-+}
-+
-+static inline void
-+dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
-+		 int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	dma_ops->unmap_single(dev, addr, size, direction);
-+}
-+
-+#define dma_map_page(dev,page,offset,size,dir) \
-+	dma_map_single((dev), page_address(page)+(offset), (size), (dir))
-+
-+#define dma_unmap_page dma_unmap_single
-+
-+static inline void
-+dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
-+			size_t size, int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_single_for_cpu)
-+		dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
-+					     direction);
-+	flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
-+			   size_t size, int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_single_for_device)
-+		dma_ops->sync_single_for_device(hwdev, dma_handle, size,
-+						direction);
-+	flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
-+			      unsigned long offset, size_t size, int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_single_range_for_cpu) {
-+		dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
-+	}
-+
-+	flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
-+				 unsigned long offset, size_t size, int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_single_range_for_device)
-+		dma_ops->sync_single_range_for_device(hwdev, dma_handle,
-+						      offset, size, direction);
-+
-+	flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-+		    int nelems, int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_sg_for_cpu)
-+		dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
-+	flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-+		       int nelems, int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	if (dma_ops->sync_sg_for_device) {
-+		dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
-+	}
-+
-+	flush_write_buffers();
-+}
-+
-+static inline int
-+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	return dma_ops->map_sg(hwdev, sg, nents, direction);
-+}
-+
-+static inline void
-+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+	     int direction)
-+{
-+	BUG_ON(!valid_dma_direction(direction));
-+	dma_ops->unmap_sg(hwdev, sg, nents, direction);
-+}
-+
-+extern int dma_supported(struct device *hwdev, u64 mask);
-+
-+/* same for gart, swiotlb, and nommu */
-+static inline int dma_get_cache_alignment(void)
-+{
-+	return boot_cpu_data.x86_clflush_size;
-+}
-+
-+#define dma_is_consistent(h) 1
-+
-+extern int dma_set_mask(struct device *dev, u64 mask);
-+
-+static inline void
-+dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
-+{
-+	flush_write_buffers();
-+}
-+
-+extern struct device fallback_dev;
-+#endif
-+extern int panic_on_overflow;
-+
-+#endif /* _X8664_DMA_MAPPING_H */
-+
-+#include <asm-i386/mach-xen/asm/dma-mapping.h>
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/dmi.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/dmi.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/dmi.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/dmi.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,29 @@
-+#ifndef _ASM_DMI_H
-+#define _ASM_DMI_H 1
-+
-+#include <asm/io.h>
-+
-+extern void *dmi_ioremap(unsigned long addr, unsigned long size);
-+extern void dmi_iounmap(void *addr, unsigned long size);
-+extern void *bt_ioremap(unsigned long addr, unsigned long size);
-+extern void bt_iounmap(void *addr, unsigned long size);
-+
-+#define DMI_MAX_DATA 2048
-+
-+extern int dmi_alloc_index;
-+extern char dmi_alloc_data[DMI_MAX_DATA];
-+
-+/* This is so early that there is no good way to allocate dynamic memory. 
-+   Allocate data in an BSS array. */
-+static inline void *dmi_alloc(unsigned len)
-+{
-+	int idx = dmi_alloc_index;
-+	if ((dmi_alloc_index += len) > DMI_MAX_DATA)
-+		return NULL;
-+	return dmi_alloc_data + idx;
-+}
-+
-+#define dmi_ioremap bt_ioremap
-+#define dmi_iounmap bt_iounmap
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/e820.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/e820.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/e820.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/e820.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,64 @@
-+/*
-+ * structures and definitions for the int 15, ax=e820 memory map
-+ * scheme.
-+ *
-+ * In a nutshell, setup.S populates a scratch table in the
-+ * empty_zero_block that contains a list of usable address/size
-+ * duples.  setup.c, this information is transferred into the e820map,
-+ * and in init.c/numa.c, that new information is used to mark pages
-+ * reserved or not.
-+ */
-+#ifndef __E820_HEADER
-+#define __E820_HEADER
-+
-+#include <linux/mmzone.h>
-+
-+#define E820MAP	0x2d0		/* our map */
-+#define E820MAX	128		/* number of entries in E820MAP */
-+#define E820NR	0x1e8		/* # entries in E820MAP */
-+
-+#define E820_RAM	1
-+#define E820_RESERVED	2
-+#define E820_ACPI	3 /* usable as RAM once ACPI tables have been read */
-+#define E820_NVS	4
-+
-+#define HIGH_MEMORY	(1024*1024)
-+
-+#define LOWMEMSIZE()	(0x9f000)
-+
-+#ifndef __ASSEMBLY__
-+struct e820entry {
-+	u64 addr;	/* start of memory segment */
-+	u64 size;	/* size of memory segment */
-+	u32 type;	/* type of memory segment */
-+} __attribute__((packed));
-+
-+struct e820map {
-+    int nr_map;
-+	struct e820entry map[E820MAX];
-+};
-+
-+extern unsigned long find_e820_area(unsigned long start, unsigned long end, 
-+				    unsigned size);
-+extern void add_memory_region(unsigned long start, unsigned long size, 
-+			      int type);
-+extern void setup_memory_region(void);
-+extern void contig_e820_setup(void); 
-+extern unsigned long e820_end_of_ram(void);
-+extern void e820_reserve_resources(struct e820entry *e820, int nr_map);
-+extern void e820_print_map(char *who);
-+extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
-+extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
-+
-+extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
-+extern void e820_setup_gap(struct e820entry *e820, int nr_map);
-+extern unsigned long e820_hole_size(unsigned long start_pfn,
-+				    unsigned long end_pfn);
-+
-+extern void __init parse_memopt(char *p, char **end);
-+extern void __init parse_memmapopt(char *p, char **end);
-+
-+extern struct e820map e820;
-+#endif/*!__ASSEMBLY__*/
-+
-+#endif/*__E820_HEADER*/
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/fixmap.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/fixmap.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/fixmap.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/fixmap.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,113 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/fixmap.h b/include/asm-x86_64/mach-xen/asm/fixmap.h
+--- a/include/asm-x86_64/mach-xen/asm/fixmap.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/fixmap.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,109 @@
 +/*
 + * fixmap.h: compile-time virtual memory allocation
 + *
@@ -79599,13 +74561,9 @@
 +	VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
 +	VSYSCALL_HPET,
 +	FIX_HPET_BASE,
-+#ifdef CONFIG_X86_LOCAL_APIC
 +	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
-+#endif
-+#ifdef CONFIG_X86_IO_APIC
 +	FIX_IO_APIC_BASE_0,
 +	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-+#endif
 +#ifdef CONFIG_ACPI
 +	FIX_ACPI_BEGIN,
 +	FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
@@ -79671,10 +74629,10 @@
 +}
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/floppy.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/floppy.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/floppy.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/floppy.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,206 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/floppy.h b/include/asm-x86_64/mach-xen/asm/floppy.h
+--- a/include/asm-x86_64/mach-xen/asm/floppy.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/floppy.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,207 @@
 +/*
 + * Architecture specific parts of the Floppy driver
 + *
@@ -79691,6 +74649,7 @@
 +
 +#include <linux/vmalloc.h>
 +
++
 +/*
 + * The DMA channel used by the floppy controller cannot access data at
 + * addresses >= 16MB
@@ -79731,7 +74690,7 @@
 +static int virtual_dma_mode;
 +static int doing_pdma;
 +
-+static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
++static irqreturn_t floppy_hardint(int irq, void *dev_id)
 +{
 +	register unsigned char st;
 +
@@ -79743,7 +74702,7 @@
 +	static int dma_wait=0;
 +#endif
 +	if (!doing_pdma)
-+		return floppy_interrupt(irq, dev_id, regs);
++		return floppy_interrupt(irq, dev_id);
 +
 +#ifdef TRACE_FLPY_INT
 +	if(!calls)
@@ -79786,7 +74745,7 @@
 +		dma_wait=0;
 +#endif
 +		doing_pdma = 0;
-+		floppy_interrupt(irq, dev_id, regs);
++		floppy_interrupt(irq, dev_id);
 +		return IRQ_HANDLED;
 +	}
 +#ifdef TRACE_FLPY_INT
@@ -79881,9 +74840,9 @@
 +#define EXTRA_FLOPPY_PARAMS
 +
 +#endif /* __ASM_XEN_X86_64_FLOPPY_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/hw_irq.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/hw_irq.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/hw_irq.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/hw_irq.h	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/hw_irq.h b/include/asm-x86_64/mach-xen/asm/hw_irq.h
+--- a/include/asm-x86_64/mach-xen/asm/hw_irq.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/hw_irq.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,136 @@
 +#ifndef _ASM_HW_IRQ_H
 +#define _ASM_HW_IRQ_H
@@ -79906,8 +74865,7 @@
 +#include <asm/irq.h>
 +#include <linux/profile.h>
 +#include <linux/smp.h>
-+
-+struct hw_interrupt_type;
++#include <linux/percpu.h>
 +#endif
 +
 +#define NMI_VECTOR		0x02
@@ -79964,9 +74922,10 @@
 +
 +
 +#ifndef __ASSEMBLY__
-+extern u8 irq_vector[NR_IRQ_VECTORS];
-+#define IO_APIC_VECTOR(irq)	(irq_vector[irq])
-+#define AUTO_ASSIGN		-1
++typedef int vector_irq_t[NR_VECTORS];
++DECLARE_PER_CPU(vector_irq_t, vector_irq);
++extern void __setup_vector_irq(int cpu);
++extern spinlock_t vector_lock;
 +
 +/*
 + * Various low-level irq details needed by irq.c, process.c,
@@ -80021,9 +74980,9 @@
 +#endif
 +
 +#endif /* _ASM_HW_IRQ_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/hypercall.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/hypercall.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/hypercall.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/hypercall.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/hypercall.h b/include/asm-x86_64/mach-xen/asm/hypercall.h
+--- a/include/asm-x86_64/mach-xen/asm/hypercall.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/hypercall.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,389 @@
 +/******************************************************************************
 + * hypercall.h
@@ -80414,16 +75373,16 @@
 +}
 +
 +#endif /* __HYPERCALL_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/hypervisor.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/hypervisor.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/hypervisor.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/hypervisor.h b/include/asm-x86_64/mach-xen/asm/hypervisor.h
+--- a/include/asm-x86_64/mach-xen/asm/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/hypervisor.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,2 @@
 +
 +#include <asm-i386/mach-xen/asm/hypervisor.h>
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/io.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/io.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/io.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/io.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,327 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/io.h b/include/asm-x86_64/mach-xen/asm/io.h
+--- a/include/asm-x86_64/mach-xen/asm/io.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/io.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,300 @@
 +#ifndef _ASM_IO_H
 +#define _ASM_IO_H
 +
@@ -80697,33 +75656,6 @@
 +
 +#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),(void *)(b),(c),(d))
 +
-+/**
-+ *	check_signature		-	find BIOS signatures
-+ *	@io_addr: mmio address to check 
-+ *	@signature:  signature block
-+ *	@length: length of signature
-+ *
-+ *	Perform a signature comparison with the mmio address io_addr. This
-+ *	address should have been obtained by ioremap.
-+ *	Returns 1 on a match.
-+ */
-+ 
-+static inline int check_signature(void __iomem *io_addr,
-+	const unsigned char *signature, int length)
-+{
-+	int retval = 0;
-+	do {
-+		if (readb(io_addr) != *signature)
-+			goto out;
-+		io_addr++;
-+		signature++;
-+		length--;
-+	} while (length);
-+	retval = 1;
-+out:
-+	return retval;
-+}
-+
 +/* Nothing to do */
 +
 +#define dma_cache_inv(_start,_size)		do { } while (0)
@@ -80751,9 +75683,9 @@
 +#define ARCH_HAS_DEV_MEM
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/irqflags.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/irqflags.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/irqflags.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/irqflags.h	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/irqflags.h b/include/asm-x86_64/mach-xen/asm/irqflags.h
+--- a/include/asm-x86_64/mach-xen/asm/irqflags.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/irqflags.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,65 @@
 +/*
 + * include/asm-x86_64/irqflags.h
@@ -80820,10 +75752,10 @@
 +#endif
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/irq.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/irq.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/irq.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/irq.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,38 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/irq.h b/include/asm-x86_64/mach-xen/asm/irq.h
+--- a/include/asm-x86_64/mach-xen/asm/irq.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/irq.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,34 @@
 +#ifndef _ASM_IRQ_H
 +#define _ASM_IRQ_H
 +
@@ -80846,11 +75778,7 @@
 +	return ((irq == 2) ? 9 : irq);
 +}
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
 +#define ARCH_HAS_NMI_WATCHDOG		/* See include/linux/nmi.h */
-+#endif
-+
-+#define KDB_VECTOR	0xf9
 +
 +# define irq_ctx_init(cpu) do { } while (0)
 +
@@ -80862,9 +75790,9 @@
 +#define __ARCH_HAS_DO_SOFTIRQ 1
 +
 +#endif /* _ASM_IRQ_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/maddr.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/maddr.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/maddr.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/maddr.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/maddr.h b/include/asm-x86_64/mach-xen/asm/maddr.h
+--- a/include/asm-x86_64/mach-xen/asm/maddr.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/maddr.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,139 @@
 +#ifndef _X86_64_MADDR_H
 +#define _X86_64_MADDR_H
@@ -81005,9 +75933,9 @@
 +
 +#endif /* _X86_64_MADDR_H */
 +
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/mmu_context.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/mmu_context.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/mmu_context.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/mmu_context.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/mmu_context.h b/include/asm-x86_64/mach-xen/asm/mmu_context.h
+--- a/include/asm-x86_64/mach-xen/asm/mmu_context.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/mmu_context.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,135 @@
 +#ifndef __X86_64_MMU_CONTEXT_H
 +#define __X86_64_MMU_CONTEXT_H
@@ -81144,52 +76072,10 @@
 +}
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/mmu.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/mmu.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/mmu.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/mmu.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,38 @@
-+#ifndef __x86_64_MMU_H
-+#define __x86_64_MMU_H
-+
-+#include <linux/spinlock.h>
-+#include <asm/semaphore.h>
-+
-+/*
-+ * The x86_64 doesn't have a mmu context, but
-+ * we put the segment information here.
-+ *
-+ * cpu_vm_mask is used to optimize ldt flushing.
-+ */
-+typedef struct { 
-+	void *ldt;
-+	rwlock_t ldtlock; 
-+	int size;
-+	struct semaphore sem; 
-+#ifdef CONFIG_XEN
-+	unsigned pinned:1;
-+	unsigned has_foreign_mappings:1;
-+	struct list_head unpinned;
-+#endif
-+} mm_context_t;
-+
-+#ifdef CONFIG_XEN
-+extern struct list_head mm_unpinned;
-+extern spinlock_t mm_unpinned_lock;
-+
-+/* mm/memory.c:exit_mmap hook */
-+extern void _arch_exit_mmap(struct mm_struct *mm);
-+#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
-+
-+/* kernel/fork.c:dup_mmap hook */
-+extern void _arch_dup_mmap(struct mm_struct *mm);
-+#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
-+#endif
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/msr.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/msr.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/msr.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/msr.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,399 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/msr.h b/include/asm-x86_64/mach-xen/asm/msr.h
+--- a/include/asm-x86_64/mach-xen/asm/msr.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/msr.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,427 @@
 +#ifndef X86_64_MSR_H
 +#define X86_64_MSR_H 1
 +
@@ -81258,14 +76144,25 @@
 +#define rdtscl(low) \
 +     __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
 +
++#define rdtscp(low,high,aux) \
++     asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
++
 +#define rdtscll(val) do { \
 +     unsigned int __a,__d; \
 +     asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
 +     (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
 +} while(0)
 +
++#define rdtscpll(val, aux) do { \
++     unsigned long __a, __d; \
++     asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
++     (val) = (__d << 32) | __a; \
++} while (0)
++
 +#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
 +
++#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
++
 +#define rdpmc(counter,low,high) \
 +     __asm__ __volatile__("rdpmc" \
 +			  : "=a" (low), "=d" (high) \
@@ -81350,8 +76247,8 @@
 +#define MSR_LSTAR 0xc0000082 		/* long mode SYSCALL target */
 +#define MSR_CSTAR 0xc0000083		/* compatibility mode SYSCALL target */
 +#define MSR_SYSCALL_MASK 0xc0000084	/* EFLAGS mask for syscall */
-+#define MSR_FS_BASE 0xc0000100		/* 64bit GS base */
-+#define MSR_GS_BASE 0xc0000101		/* 64bit FS base */
++#define MSR_FS_BASE 0xc0000100		/* 64bit FS base */
++#define MSR_GS_BASE 0xc0000101		/* 64bit GS base */
 +#define MSR_KERNEL_GS_BASE  0xc0000102	/* SwapGS GS shadow (or USER_GS from kernel) */ 
 +/* EFER bits: */ 
 +#define _EFER_SCE 0  /* SYSCALL/SYSRET */
@@ -81370,6 +76267,7 @@
 +
 +#define MSR_IA32_PERFCTR0      0xc1
 +#define MSR_IA32_PERFCTR1      0xc2
++#define MSR_FSB_FREQ		0xcd
 +
 +#define MSR_MTRRcap		0x0fe
 +#define MSR_IA32_BBL_CR_CTL        0x119
@@ -81391,6 +76289,10 @@
 +#define MSR_IA32_LASTINTFROMIP     0x1dd
 +#define MSR_IA32_LASTINTTOIP       0x1de
 +
++#define MSR_IA32_PEBS_ENABLE		0x3f1
++#define MSR_IA32_DS_AREA		0x600
++#define MSR_IA32_PERF_CAPABILITIES	0x345
++
 +#define MSR_MTRRfix64K_00000	0x250
 +#define MSR_MTRRfix16K_80000	0x258
 +#define MSR_MTRRfix16K_A0000	0x259
@@ -81488,6 +76390,9 @@
 +#define MSR_IA32_PERF_STATUS		0x198
 +#define MSR_IA32_PERF_CTL		0x199
 +
++#define MSR_IA32_MPERF			0xE7
++#define MSR_IA32_APERF			0xE8
++
 +#define MSR_IA32_THERM_CONTROL		0x19a
 +#define MSR_IA32_THERM_INTERRUPT	0x19b
 +#define MSR_IA32_THERM_STATUS		0x19c
@@ -81558,138 +76463,50 @@
 +#define MSR_P4_FIRM_ESCR0 		0x3a4
 +#define MSR_P4_FIRM_ESCR1 		0x3a5
 +#define MSR_P4_FLAME_ESCR0 		0x3a6
-+#define MSR_P4_FLAME_ESCR1 		0x3a7
-+#define MSR_P4_FSB_ESCR0 		0x3a2
-+#define MSR_P4_FSB_ESCR1 		0x3a3
-+#define MSR_P4_IQ_ESCR0 		0x3ba
-+#define MSR_P4_IQ_ESCR1 		0x3bb
-+#define MSR_P4_IS_ESCR0 		0x3b4
-+#define MSR_P4_IS_ESCR1 		0x3b5
-+#define MSR_P4_ITLB_ESCR0 		0x3b6
-+#define MSR_P4_ITLB_ESCR1 		0x3b7
-+#define MSR_P4_IX_ESCR0 		0x3c8
-+#define MSR_P4_IX_ESCR1 		0x3c9
-+#define MSR_P4_MOB_ESCR0 		0x3aa
-+#define MSR_P4_MOB_ESCR1 		0x3ab
-+#define MSR_P4_MS_ESCR0 		0x3c0
-+#define MSR_P4_MS_ESCR1 		0x3c1
-+#define MSR_P4_PMH_ESCR0 		0x3ac
-+#define MSR_P4_PMH_ESCR1 		0x3ad
-+#define MSR_P4_RAT_ESCR0 		0x3bc
-+#define MSR_P4_RAT_ESCR1 		0x3bd
-+#define MSR_P4_SAAT_ESCR0 		0x3ae
-+#define MSR_P4_SAAT_ESCR1 		0x3af
-+#define MSR_P4_SSU_ESCR0 		0x3be
-+#define MSR_P4_SSU_ESCR1 		0x3bf    /* guess: not defined in manual */
-+#define MSR_P4_TBPU_ESCR0 		0x3c2
-+#define MSR_P4_TBPU_ESCR1 		0x3c3
-+#define MSR_P4_TC_ESCR0 		0x3c4
-+#define MSR_P4_TC_ESCR1 		0x3c5
-+#define MSR_P4_U2L_ESCR0 		0x3b0
-+#define MSR_P4_U2L_ESCR1 		0x3b1
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/nmi.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/nmi.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/nmi.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/nmi.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,93 @@
-+/*
-+ *  linux/include/asm-i386/nmi.h
-+ */
-+#ifndef ASM_NMI_H
-+#define ASM_NMI_H
-+
-+#include <linux/pm.h>
-+#include <asm/io.h>
-+
-+#include <xen/interface/nmi.h>
-+
-+struct pt_regs;
-+
-+typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
-+
-+/**
-+ * set_nmi_callback
-+ *
-+ * Set a handler for an NMI. Only one handler may be
-+ * set. Return 1 if the NMI was handled.
-+ */
-+void set_nmi_callback(nmi_callback_t callback);
-+
-+/**
-+ * unset_nmi_callback
-+ *
-+ * Remove the handler previously set.
-+ */
-+void unset_nmi_callback(void);
-+
-+#ifdef CONFIG_PM
-+ 
-+/** Replace the PM callback routine for NMI. */
-+struct pm_dev * set_nmi_pm_callback(pm_callback callback);
-+
-+/** Unset the PM callback routine back to the default. */
-+void unset_nmi_pm_callback(struct pm_dev * dev);
-+
-+#else
-+
-+static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
-+{
-+	return 0;
-+} 
-+ 
-+static inline void unset_nmi_pm_callback(struct pm_dev * dev)
-+{
-+}
-+
-+#endif /* CONFIG_PM */
-+ 
-+extern void default_do_nmi(struct pt_regs *);
-+extern void die_nmi(char *str, struct pt_regs *regs);
-+
-+static inline unsigned char get_nmi_reason(void)
-+{
-+        shared_info_t *s = HYPERVISOR_shared_info;
-+        unsigned char reason = 0;
-+
-+        /* construct a value which looks like it came from
-+         * port 0x61.
-+         */
-+        if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
-+                reason |= 0x40;
-+        if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
-+                reason |= 0x80;
-+
-+        return reason;
-+}
++#define MSR_P4_FLAME_ESCR1 		0x3a7
++#define MSR_P4_FSB_ESCR0 		0x3a2
++#define MSR_P4_FSB_ESCR1 		0x3a3
++#define MSR_P4_IQ_ESCR0 		0x3ba
++#define MSR_P4_IQ_ESCR1 		0x3bb
++#define MSR_P4_IS_ESCR0 		0x3b4
++#define MSR_P4_IS_ESCR1 		0x3b5
++#define MSR_P4_ITLB_ESCR0 		0x3b6
++#define MSR_P4_ITLB_ESCR1 		0x3b7
++#define MSR_P4_IX_ESCR0 		0x3c8
++#define MSR_P4_IX_ESCR1 		0x3c9
++#define MSR_P4_MOB_ESCR0 		0x3aa
++#define MSR_P4_MOB_ESCR1 		0x3ab
++#define MSR_P4_MS_ESCR0 		0x3c0
++#define MSR_P4_MS_ESCR1 		0x3c1
++#define MSR_P4_PMH_ESCR0 		0x3ac
++#define MSR_P4_PMH_ESCR1 		0x3ad
++#define MSR_P4_RAT_ESCR0 		0x3bc
++#define MSR_P4_RAT_ESCR1 		0x3bd
++#define MSR_P4_SAAT_ESCR0 		0x3ae
++#define MSR_P4_SAAT_ESCR1 		0x3af
++#define MSR_P4_SSU_ESCR0 		0x3be
++#define MSR_P4_SSU_ESCR1 		0x3bf    /* guess: not defined in manual */
++#define MSR_P4_TBPU_ESCR0 		0x3c2
++#define MSR_P4_TBPU_ESCR1 		0x3c3
++#define MSR_P4_TC_ESCR0 		0x3c4
++#define MSR_P4_TC_ESCR1 		0x3c5
++#define MSR_P4_U2L_ESCR0 		0x3b0
++#define MSR_P4_U2L_ESCR1 		0x3b1
 +
-+extern int panic_on_timeout;
-+extern int unknown_nmi_panic;
++/* Intel Core-based CPU performance counters */
++#define MSR_CORE_PERF_FIXED_CTR0	0x309
++#define MSR_CORE_PERF_FIXED_CTR1	0x30a
++#define MSR_CORE_PERF_FIXED_CTR2	0x30b
++#define MSR_CORE_PERF_FIXED_CTR_CTRL	0x38d
++#define MSR_CORE_PERF_GLOBAL_STATUS	0x38e
++#define MSR_CORE_PERF_GLOBAL_CTRL	0x38f
++#define MSR_CORE_PERF_GLOBAL_OVF_CTRL	0x390
 +
-+extern int check_nmi_watchdog(void);
-+ 
-+extern void setup_apic_nmi_watchdog (void);
-+extern int reserve_lapic_nmi(void);
-+extern void release_lapic_nmi(void);
-+extern void disable_timer_nmi_watchdog(void);
-+extern void enable_timer_nmi_watchdog(void);
-+extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
-+
-+extern void nmi_watchdog_default(void);
-+extern int setup_nmi_watchdog(char *);
-+
-+extern unsigned int nmi_watchdog;
-+#define NMI_DEFAULT	-1
-+#define NMI_NONE	0
-+#define NMI_IO_APIC	1
-+#define NMI_LOCAL_APIC	2
-+#define NMI_INVALID	3
-+
-+#endif /* ASM_NMI_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/page.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/page.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/page.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/page.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,208 @@
++#endif
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/page.h b/include/asm-x86_64/mach-xen/asm/page.h
+--- a/include/asm-x86_64/mach-xen/asm/page.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/page.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,213 @@
 +#ifndef _X86_64_PAGE_H
 +#define _X86_64_PAGE_H
 +
@@ -81769,7 +76586,6 @@
 +
 +#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
 +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-+
 +/*
 + * These are used to make use of C type-checking..
 + */
@@ -81837,7 +76653,7 @@
 +#define __PHYSICAL_START	((unsigned long)CONFIG_PHYSICAL_START)
 +#define __START_KERNEL		(__START_KERNEL_map + __PHYSICAL_START)
 +#define __START_KERNEL_map	0xffffffff80000000UL
-+#define __PAGE_OFFSET           0xffff880000000000UL	
++#define __PAGE_OFFSET           0xffff880000000000UL
 +
 +#else
 +#define __PHYSICAL_START	CONFIG_PHYSICAL_START
@@ -81863,6 +76679,12 @@
 +#define KERNEL_TEXT_SIZE  (40UL*1024*1024)
 +#define KERNEL_TEXT_START 0xffffffff80000000UL 
 +
++#ifndef __ASSEMBLY__
++
++#include <asm/bug.h>
++
++#endif /* __ASSEMBLY__ */
++
 +#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
 +
 +/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
@@ -81898,213 +76720,10 @@
 +#endif /* __KERNEL__ */
 +
 +#endif /* _X86_64_PAGE_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/param.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/param.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/param.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/param.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,22 @@
-+#ifndef _ASMx86_64_PARAM_H
-+#define _ASMx86_64_PARAM_H
-+
-+#ifdef __KERNEL__
-+# define HZ            CONFIG_HZ	/* Internal kernel timer frequency */
-+# define USER_HZ       100		/* .. some user interfaces are in "ticks */
-+# define CLOCKS_PER_SEC		(USER_HZ)	/* like times() */
-+#endif
-+
-+#ifndef HZ
-+#define HZ 100
-+#endif
-+
-+#define EXEC_PAGESIZE	4096
-+
-+#ifndef NOGROUP
-+#define NOGROUP		(-1)
-+#endif
-+
-+#define MAXHOSTNAMELEN	64	/* max length of hostname */
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/pci.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/pci.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/pci.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/pci.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,173 @@
-+#ifndef __x8664_PCI_H
-+#define __x8664_PCI_H
-+
-+#include <asm/io.h>
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/mm.h> /* for struct page */
-+
-+/* Can be used to override the logic in pci_scan_bus for skipping
-+   already-configured bus numbers - to be used for buggy BIOSes
-+   or architectures with incomplete PCI setup by the loader */
-+
-+#ifdef CONFIG_PCI
-+extern unsigned int pcibios_assign_all_busses(void);
-+#else
-+#define pcibios_assign_all_busses()	0
-+#endif
-+#define pcibios_scan_all_fns(a, b)	0
-+
-+extern unsigned long pci_mem_start;
-+#define PCIBIOS_MIN_IO		0x1000
-+#define PCIBIOS_MIN_MEM		(pci_mem_start)
-+
-+#define PCIBIOS_MIN_CARDBUS_IO	0x4000
-+
-+void pcibios_config_init(void);
-+struct pci_bus * pcibios_scan_root(int bus);
-+extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
-+extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
-+
-+void pcibios_set_master(struct pci_dev *dev);
-+void pcibios_penalize_isa_irq(int irq, int active);
-+struct irq_routing_table *pcibios_get_irq_routing_table(void);
-+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-+
-+#include <linux/types.h>
-+#include <linux/slab.h>
-+#include <asm/scatterlist.h>
-+#include <linux/string.h>
-+#include <asm/page.h>
-+
-+extern void pci_iommu_alloc(void);
-+extern int iommu_setup(char *opt);
-+
-+/* The PCI address space does equal the physical memory
-+ * address space.  The networking and block device layers use
-+ * this boolean for bounce buffer decisions
-+ *
-+ * On AMD64 it mostly equals, but we set it to zero if a hardware
-+ * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
-+ */
-+#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
-+
-+#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
-+
-+/*
-+ * x86-64 always supports DAC, but sometimes it is useful to force
-+ * devices through the IOMMU to get automatic sg list merging.
-+ * Optional right now.
-+ */
-+extern int iommu_sac_force;
-+#define pci_dac_dma_supported(pci_dev, mask)	(!iommu_sac_force)
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
-+	dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
-+	__u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME)			\
-+	((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
-+	(((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME)			\
-+	((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
-+	(((PTR)->LEN_NAME) = (VAL))
-+
-+#elif defined(CONFIG_SWIOTLB)
-+
-+#define pci_dac_dma_supported(pci_dev, mask)    1
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
-+	dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
-+	__u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME)			\
-+	((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
-+	(((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME)			\
-+	((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
-+	(((PTR)->LEN_NAME) = (VAL))
-+
-+#else
-+/* No IOMMU */
-+
-+#define pci_dac_dma_supported(pci_dev, mask)    1
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-+#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
-+#define pci_unmap_len(PTR, LEN_NAME)		(0)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
-+
-+#endif
-+
-+#include <asm-generic/pci-dma-compat.h>
-+
-+static inline dma64_addr_t
-+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-+{
-+	return ((dma64_addr_t) page_to_phys(page) +
-+		(dma64_addr_t) offset);
-+}
-+
-+static inline struct page *
-+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+	return virt_to_page(__va(dma_addr)); 	
-+}
-+
-+static inline unsigned long
-+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+	return (dma_addr & ~PAGE_MASK);
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+	flush_write_buffers();
-+}
-+
-+#ifdef CONFIG_PCI
-+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
-+					enum pci_dma_burst_strategy *strat,
-+					unsigned long *strategy_parameter)
-+{
-+	*strat = PCI_DMA_BURST_INFINITY;
-+	*strategy_parameter = ~0UL;
-+}
-+#endif
-+
-+#define HAVE_PCI_MMAP
-+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-+			       enum pci_mmap_state mmap_state, int write_combine);
-+
-+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-+{
-+}
-+
-+#endif /* __KERNEL__ */
-+
-+/* generic pci stuff */
-+#ifdef CONFIG_PCI
-+#include <asm-generic/pci.h>
-+#endif
-+
-+/* On Xen we have to scan all functions since Xen hides bridges from
-+ * us.  If a bridge is at fn=0 and that slot has a multifunction
-+ * device, we won't find the additional devices without scanning all
-+ * functions. */
-+#undef pcibios_scan_all_fns
-+#define pcibios_scan_all_fns(a, b)	1
-+
-+#endif /* __x8664_PCI_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/pgalloc.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/pgalloc.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/pgalloc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/pgalloc.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,226 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/pgalloc.h b/include/asm-x86_64/mach-xen/asm/pgalloc.h
+--- a/include/asm-x86_64/mach-xen/asm/pgalloc.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/pgalloc.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,224 @@
 +#ifndef _X86_64_PGALLOC_H
 +#define _X86_64_PGALLOC_H
 +
@@ -82127,18 +76746,6 @@
 +	set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
 +}
 +
-+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
-+{
-+	if (unlikely((mm)->context.pinned)) {
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			       (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
-+			       pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
-+		set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
-+	} else {
-+		*(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
-+	}
-+}
-+
 +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 +{
 +	if (unlikely((mm)->context.pinned)) {
@@ -82171,6 +76778,18 @@
 +	}
 +}
 +
++static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
++{
++	if (unlikely((mm)->context.pinned)) {
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			       (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
++			       pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
++		set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
++	} else {
++		*(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
++	}
++}
++
 +static inline void pmd_free(pmd_t *pmd)
 +{
 +	pte_t *ptep = virt_to_ptep(pmd);
@@ -82184,19 +76803,17 @@
 +	free_page((unsigned long)pmd);
 +}
 +
-+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
++static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
 +{
-+        pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-+        return pmd;
++	return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
 +}
 +
 +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 +{
-+        pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-+        return pud;
++	return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
 +}
 +
-+static inline void pud_free(pud_t *pud)
++static inline void pud_free (pud_t *pud)
 +{
 +	pte_t *ptep = virt_to_ptep(pud);
 +
@@ -82331,10 +76948,10 @@
 +#define __pud_free_tlb(tlb,x)   pud_free((x))
 +
 +#endif /* _X86_64_PGALLOC_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/pgtable.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/pgtable.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/pgtable.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/pgtable.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,558 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/pgtable.h b/include/asm-x86_64/mach-xen/asm/pgtable.h
+--- a/include/asm-x86_64/mach-xen/asm/pgtable.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/pgtable.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,553 @@
 +#ifndef _X86_64_PGTABLE_H
 +#define _X86_64_PGTABLE_H
 +
@@ -82381,12 +76998,9 @@
 +
 +#define swapper_pg_dir init_level4_pgt
 +
-+extern int nonx_setup(char *str);
 +extern void paging_init(void);
 +extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
 +
-+extern unsigned long pgkern_mask;
-+
 +/*
 + * ZERO_PAGE is a global shared page that is always zero: used
 + * for zero-mapped memory areas etc..
@@ -82433,22 +77047,27 @@
 +#define set_pte_batched(pteptr, pteval) \
 +	queue_l1_entry_update(pteptr, (pteval))
 +
-+extern inline int pud_present(pud_t pud)	{ return !pud_none(pud); }
-+
 +static inline void set_pte(pte_t *dst, pte_t val)
 +{
 +	*dst = val;
 +}
++#define set_pte_at(_mm,addr,ptep,pteval) do {				\
++	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
++	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
++		set_pte((ptep), (pteval));				\
++} while (0)
 +
 +#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
++
 +#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
-+#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
 +
-+static inline void pud_clear (pud_t * pud)
++static inline void pud_clear (pud_t *pud)
 +{
 +	set_pud(pud, __pud(0));
 +}
 +
++#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
++
 +#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
 +
 +static inline void pgd_clear (pgd_t * pgd)
@@ -82457,9 +77076,6 @@
 +        set_pgd(__user_pgd(pgd), __pgd(0));
 +}
 +
-+#define pud_page(pud) \
-+    ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
-+
 +/*
 + * A note on implementation of this atomic 'get-and-clear' operation.
 + * This is actually very simple because Xen Linux can only run on a single
@@ -82607,41 +77223,32 @@
 +
 +static inline unsigned long pgd_bad(pgd_t pgd)
 +{
-+       unsigned long val = pgd_val(pgd);
-+       val &= ~PTE_MASK;
-+       val &= ~(_PAGE_USER | _PAGE_DIRTY);
-+       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
++	return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
 +}
 +
-+static inline unsigned long pud_bad(pud_t pud) 
-+{ 
-+       unsigned long val = pud_val(pud);
-+       val &= ~PTE_MASK; 
-+       val &= ~(_PAGE_USER | _PAGE_DIRTY); 
-+       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);      
-+} 
++static inline unsigned long pud_bad(pud_t pud)
++{
++	return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
++}
 +
-+#define set_pte_at(_mm,addr,ptep,pteval) do {				\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
-+		set_pte((ptep), (pteval));				\
-+} while (0)
++static inline unsigned long pmd_bad(pmd_t pmd)
++{
++	return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
++}
 +
 +#define pte_none(x)	(!(x).pte)
 +#define pte_present(x)	((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
 +#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 +
-+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-+
-+#define pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
-+#define pte_pfn(_pte) mfn_to_local_pfn(pte_mfn(_pte))
-+
++#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))	/* FIXME: is this
++						   right? */
 +#define pte_page(x)	pfn_to_page(pte_pfn(x))
++#define pte_pfn(x) mfn_to_local_pfn(pte_mfn(x))
++#define pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
 +
 +static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 +{
 +	pte_t pte;
-+        
 +	(pte).pte = (pfn_to_mfn(page_nr) << PAGE_SHIFT);
 +	(pte).pte |= pgprot_val(pgprot);
 +	(pte).pte &= __supported_pte_mask;
@@ -82657,7 +77264,7 @@
 +#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
 +static inline int pte_user(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
 +static inline int pte_read(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
-+static inline int pte_exec(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_exec(pte_t pte)		{ return !(__pte_val(pte) & _PAGE_NX); }
 +static inline int pte_dirty(pte_t pte)		{ return __pte_val(pte) & _PAGE_DIRTY; }
 +static inline int pte_young(pte_t pte)		{ return __pte_val(pte) & _PAGE_ACCESSED; }
 +static inline int pte_write(pte_t pte)		{ return __pte_val(pte) & _PAGE_RW; }
@@ -82670,11 +77277,12 @@
 +static inline pte_t pte_mkold(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
 +static inline pte_t pte_wrprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_RW; return pte; }
 +static inline pte_t pte_mkread(pte_t pte)	{ __pte_val(pte) |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkexec(pte_t pte)	{ __pte_val(pte) |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkexec(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_NX; return pte; }
 +static inline pte_t pte_mkdirty(pte_t pte)	{ __pte_val(pte) |= _PAGE_DIRTY; return pte; }
 +static inline pte_t pte_mkyoung(pte_t pte)	{ __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 +static inline pte_t pte_mkwrite(pte_t pte)	{ __pte_val(pte) |= _PAGE_RW; return pte; }
 +static inline pte_t pte_mkhuge(pte_t pte)	{ __pte_val(pte) |= _PAGE_PSE; return pte; }
++static inline pte_t pte_clrhuge(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_PSE; return pte; }
 +
 +struct vm_area_struct;
 +
@@ -82722,7 +77330,8 @@
 + * Level 4 access.
 + * Never use these in the common code.
 + */
-+#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
++#define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
++#define pgd_page(pgd)		(pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
 +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 +#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
 +#define pgd_offset_k(address) (pgd_t *)(init_level4_pgt + pgd_index(address))
@@ -82731,8 +77340,11 @@
 +
 +/* PUD - Level3 access */
 +/* to find an entry in a page-table-directory. */
++#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
++#define pud_page(pud)		(pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
 +#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-+#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
++#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
++extern inline int pud_present(pud_t pud)	{ return !pud_none(pud); }
 +
 +/* Find correct pud via the hidden fourth level page level: */
 +
@@ -82744,18 +77356,17 @@
 +}
 +
 +/* PMD  - Level 2 access */
-+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
++#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
 +#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
 +
 +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-+#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
-+                                  pmd_index(address))
++#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
++                        pmd_index(address))
 +#define pmd_none(x)	(!pmd_val(x))
 +/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
 +   can temporarily clear it. */
 +#define pmd_present(x)	(pmd_val(x))
 +#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
-+#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
 +#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
 +#define pmd_pfn(x)  ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
 +
@@ -82773,7 +77384,8 @@
 +static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
 +{ 
 +	pte_t pte;
-+	(pte).pte = physpage | pgprot_val(pgprot); 
++	pte.pte = physpage | pgprot_val(pgprot); 
++	pte.pte &= __supported_pte_mask;
 +	return pte; 
 +}
 + 
@@ -82788,7 +77400,7 @@
 +
 +#define pte_index(address) \
 +		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-+#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
++#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
 +			pte_index(address))
 +
 +/* x86-64 always has all page tables mapped. */
@@ -82893,10 +77505,10 @@
 +#include <asm-generic/pgtable.h>
 +
 +#endif /* _X86_64_PGTABLE_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/processor.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/processor.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/processor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/processor.h	2006-12-05 18:42:37.000000000 +0100
-@@ -0,0 +1,503 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/processor.h b/include/asm-x86_64/mach-xen/asm/processor.h
+--- a/include/asm-x86_64/mach-xen/asm/processor.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/processor.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,507 @@
 +/*
 + * include/asm-x86_64/processor.h
 + *
@@ -83067,12 +77679,6 @@
 +
 +
 +/*
-+ * Bus types
-+ */
-+#define MCA_bus 0
-+#define MCA_bus__is_a_macro
-+
-+/*
 + * User space process size. 47bits minus one guard page.
 + */
 +#define TASK_SIZE64	(0x800000000000UL - 4096)
@@ -83384,6 +77990,16 @@
 +		: :"a" (eax), "c" (ecx));
 +}
 +
++static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
++{
++	/* "mwait %eax,%ecx;" */
++	asm volatile(
++		"sti; .byte 0x0f,0x01,0xc9;"
++		: :"a" (eax), "c" (ecx));
++}
++
++extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
++
 +#define stack_current() \
 +({								\
 +	struct thread_info *ti;					\
@@ -83400,166 +78016,28 @@
 +#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
 +
 +#endif /* __ASM_X86_64_PROCESSOR_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/ptrace.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/ptrace.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/ptrace.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/ptrace.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,125 @@
-+#ifndef _X86_64_PTRACE_H
-+#define _X86_64_PTRACE_H
-+
-+#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS) 
-+#define R15 0
-+#define R14 8
-+#define R13 16
-+#define R12 24
-+#define RBP 32
-+#define RBX 40
-+/* arguments: interrupts/non tracing syscalls only save upto here*/
-+#define R11 48
-+#define R10 56	
-+#define R9 64
-+#define R8 72
-+#define RAX 80
-+#define RCX 88
-+#define RDX 96
-+#define RSI 104
-+#define RDI 112
-+#define ORIG_RAX 120       /* = ERROR */ 
-+/* end of arguments */ 	
-+/* cpu exception frame or undefined in case of fast syscall. */
-+#define RIP 128
-+#define CS 136
-+#define EFLAGS 144
-+#define RSP 152
-+#define SS 160
-+#define ARGOFFSET R11
-+#endif /* __ASSEMBLY__ */
-+
-+/* top of stack page */ 
-+#define FRAME_SIZE 168
-+
-+#define PTRACE_OLDSETOPTIONS         21
-+
-+#ifndef __ASSEMBLY__ 
-+
-+struct pt_regs {
-+	unsigned long r15;
-+	unsigned long r14;
-+	unsigned long r13;
-+	unsigned long r12;
-+	unsigned long rbp;
-+	unsigned long rbx;
-+/* arguments: non interrupts/non tracing syscalls only save upto here*/
-+ 	unsigned long r11;
-+	unsigned long r10;	
-+	unsigned long r9;
-+	unsigned long r8;
-+	unsigned long rax;
-+	unsigned long rcx;
-+	unsigned long rdx;
-+	unsigned long rsi;
-+	unsigned long rdi;
-+	unsigned long orig_rax;
-+/* end of arguments */ 	
-+/* cpu exception frame or undefined */
-+	unsigned long rip;
-+	unsigned long cs;
-+	unsigned long eflags; 
-+	unsigned long rsp; 
-+	unsigned long ss;
-+/* top of stack page */ 
-+};
-+
-+#endif
-+
-+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-+#define PTRACE_GETREGS            12
-+#define PTRACE_SETREGS            13
-+#define PTRACE_GETFPREGS          14
-+#define PTRACE_SETFPREGS          15
-+#define PTRACE_GETFPXREGS         18
-+#define PTRACE_SETFPXREGS         19
-+
-+/* only useful for access 32bit programs */
-+#define PTRACE_GET_THREAD_AREA    25
-+#define PTRACE_SET_THREAD_AREA    26
-+
-+#define PTRACE_ARCH_PRCTL	  30	/* arch_prctl for child */
-+
-+#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 
-+#define user_mode(regs) (!!((regs)->cs & 3))
-+#define user_mode_vm(regs) user_mode(regs)
-+#define instruction_pointer(regs) ((regs)->rip)
-+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-+extern unsigned long profile_pc(struct pt_regs *regs);
-+#else
-+#define profile_pc(regs) instruction_pointer(regs)
-+#endif
-+
-+void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
-+
-+struct task_struct;
-+
-+extern unsigned long
-+convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
-+
-+enum {
-+        EF_CF   = 0x00000001,
-+        EF_PF   = 0x00000004,
-+        EF_AF   = 0x00000010,
-+        EF_ZF   = 0x00000040,
-+        EF_SF   = 0x00000080,
-+        EF_TF   = 0x00000100,
-+        EF_IE   = 0x00000200,
-+        EF_DF   = 0x00000400,
-+        EF_OF   = 0x00000800,
-+        EF_IOPL = 0x00003000,
-+        EF_IOPL_RING0 = 0x00000000,
-+        EF_IOPL_RING1 = 0x00001000,
-+        EF_IOPL_RING2 = 0x00002000,
-+        EF_NT   = 0x00004000,   /* nested task */
-+        EF_RF   = 0x00010000,   /* resume */
-+        EF_VM   = 0x00020000,   /* virtual mode */
-+        EF_AC   = 0x00040000,   /* alignment */
-+        EF_VIF  = 0x00080000,   /* virtual interrupt */
-+        EF_VIP  = 0x00100000,   /* virtual interrupt pending */
-+        EF_ID   = 0x00200000,   /* id */
-+};
-+
-+#endif
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/smp.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/smp.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/smp.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/smp.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,150 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/smp.h b/include/asm-x86_64/mach-xen/asm/smp.h
+--- a/include/asm-x86_64/mach-xen/asm/smp.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/smp.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,119 @@
 +#ifndef __ASM_SMP_H
 +#define __ASM_SMP_H
 +
 +/*
 + * We need the APIC definitions automatically as part of 'smp.h'
 + */
-+#ifndef __ASSEMBLY__
 +#include <linux/threads.h>
 +#include <linux/cpumask.h>
 +#include <linux/bitops.h>
 +extern int disable_apic;
-+#endif
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#ifndef __ASSEMBLY__
 +#include <asm/fixmap.h>
 +#include <asm/mpspec.h>
-+#ifdef CONFIG_X86_IO_APIC
 +#include <asm/io_apic.h>
-+#endif
 +#include <asm/apic.h>
 +#include <asm/thread_info.h>
-+#endif
-+#endif
 +
 +#ifdef CONFIG_SMP
-+#ifndef ASSEMBLY
 +
 +#include <asm/pda.h>
 +
@@ -83576,7 +78054,6 @@
 + 
 +extern void smp_alloc_memory(void);
 +extern volatile unsigned long smp_invalidate_needed;
-+extern int pic_mode;
 +extern void lock_ipi_call_lock(void);
 +extern void unlock_ipi_call_lock(void);
 +extern int smp_num_siblings;
@@ -83604,28 +78081,22 @@
 +
 +#define raw_smp_processor_id() read_pda(cpunumber)
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
 +static inline int hard_smp_processor_id(void)
 +{
 +	/* we don't want to mark this access volatile - bad code generation */
 +	return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
 +}
-+#endif
 +
-+extern int safe_smp_processor_id(void);
 +extern int __cpu_disable(void);
 +extern void __cpu_die(unsigned int cpu);
 +extern void prefill_possible_map(void);
 +extern unsigned num_processors;
 +extern unsigned disabled_cpus;
 +
-+#endif /* !ASSEMBLY */
-+
 +#define NO_PROC_ID		0xFF		/* No processor magic marker */
 +
 +#endif
 +
-+#ifndef ASSEMBLY
 +/*
 + * Some lowlevel functions might want to know about
 + * the real APIC ID <-> CPU # mapping.
@@ -83634,12 +78105,6 @@
 +extern u8 x86_cpu_to_log_apicid[NR_CPUS];
 +extern u8 bios_cpu_apicid[];
 +
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
-+{
-+	return cpus_addr(cpumask)[0];
-+}
-+
 +static inline int cpu_present_to_apicid(int mps_cpu)
 +{
 +	if (mps_cpu < NR_CPUS)
@@ -83647,13 +78112,9 @@
 +	else
 +		return BAD_APICID;
 +}
-+#endif
-+
-+#endif /* !ASSEMBLY */
 +
 +#ifndef CONFIG_SMP
 +#define stack_smp_processor_id() 0
-+#define safe_smp_processor_id() 0
 +#define cpu_logical_map(x) (x)
 +#else
 +#include <asm/thread_info.h>
@@ -83665,39 +78126,35 @@
 +})
 +#endif
 +
-+#ifndef __ASSEMBLY__
-+#ifdef CONFIG_X86_LOCAL_APIC
 +static __inline int logical_smp_processor_id(void)
 +{
 +	/* we don't want to mark this access volatile - bad code generation */
 +	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
 +}
-+#endif
-+#endif
 +
 +#ifdef CONFIG_SMP
 +#define cpu_physical_id(cpu)		x86_cpu_to_apicid[cpu]
 +#else
 +#define cpu_physical_id(cpu)		boot_cpu_id
++#endif /* !CONFIG_SMP */
 +#endif
 +
-+#endif
-+
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/synch_bitops.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/synch_bitops.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/synch_bitops.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/synch_bitops.h b/include/asm-x86_64/mach-xen/asm/synch_bitops.h
+--- a/include/asm-x86_64/mach-xen/asm/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/synch_bitops.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,2 @@
 +
 +#include <asm-i386/mach-xen/asm/synch_bitops.h>
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/system.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/system.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/system.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/system.h	2006-12-05 18:42:37.000000000 +0100
-@@ -0,0 +1,264 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/system.h b/include/asm-x86_64/mach-xen/asm/system.h
+--- a/include/asm-x86_64/mach-xen/asm/system.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/system.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,266 @@
 +#ifndef __ASM_SYSTEM_H
 +#define __ASM_SYSTEM_H
 +
 +#include <linux/kernel.h>
 +#include <asm/segment.h>
++#include <asm/alternative.h>
 +#include <asm/synch_bitops.h>
 +#include <asm/hypervisor.h>
 +#include <xen/interface/arch-x86_64.h>
@@ -83717,12 +78174,13 @@
 +#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
 +
 +/* frame pointer must be last for get_wchan */
-+#define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
++#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
++#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
 +
 +#define __EXTRA_CLOBBER  \
 +	,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
 +
++/* Save restore flags to clear handle leaking NT */
 +#define switch_to(prev,next,last) \
 +	asm volatile(SAVE_CONTEXT						    \
 +		     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
@@ -83745,8 +78203,7 @@
 +		       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
 +		     : "memory", "cc" __EXTRA_CLOBBER)
 +    
-+
-+extern void load_gs_index(unsigned);
++extern void load_gs_index(unsigned); 
 +
 +/*
 + * Load a segment. Fall back on loading the zero
@@ -83785,11 +78242,12 @@
 +	asm volatile("movq %0,%%cr0" :: "r" (val));
 +} 
 +
-+#define read_cr3() ({ \
-+	unsigned long __dummy; \
-+	asm("movq %%cr3,%0" : "=r" (__dummy)); \
-+	machine_to_phys(__dummy); \
-+})
++static inline unsigned long read_cr3(void)
++{ 
++	unsigned long cr3;
++	asm("movq %%cr3,%0" : "=r" (cr3));
++	return machine_to_phys(cr3);
++} 
 +
 +static inline unsigned long read_cr4(void)
 +{ 
@@ -83957,9 +78415,9 @@
 +extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/timer.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/timer.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/timer.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/timer.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/timer.h b/include/asm-x86_64/mach-xen/asm/timer.h
+--- a/include/asm-x86_64/mach-xen/asm/timer.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/timer.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,67 @@
 +#ifndef _ASMi386_TIMER_H
 +#define _ASMi386_TIMER_H
@@ -84028,28 +78486,45 @@
 +extern struct init_timer_opts timer_pmtmr_init;
 +#endif
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/tlbflush.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/tlbflush.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/tlbflush.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/tlbflush.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,103 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/tlbflush.h b/include/asm-x86_64/mach-xen/asm/tlbflush.h
+--- a/include/asm-x86_64/mach-xen/asm/tlbflush.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/tlbflush.h	2007-03-14 10:55:49.000000000 +0100
+@@ -0,0 +1,120 @@
 +#ifndef _X8664_TLBFLUSH_H
 +#define _X8664_TLBFLUSH_H
 +
 +#include <linux/mm.h>
 +#include <asm/processor.h>
 +
-+#define __flush_tlb()	xen_tlb_flush()
++static inline unsigned long get_cr3(void)
++{
++	unsigned long cr3;
++	asm volatile("mov %%cr3,%0" : "=r" (cr3));
++	return machine_to_phys(cr3);
++}
 +
-+/*
-+ * Global pages have to be flushed a bit differently. Not a real
-+ * performance problem because this does not happen often.
-+ */
-+#define __flush_tlb_global()	xen_tlb_flush()
++static inline void set_cr3(unsigned long cr3)
++{
++	BUG();
++	/* What the hell is this supposed to do: JQ */
++	asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory");
++}
++
++#define __flush_tlb()	xen_tlb_flush()
 +
++static inline unsigned long get_cr4(void)
++{
++	unsigned long cr4;
++	asm volatile("mov %%cr4,%0" : "=r" (cr4));
++	return cr4;
++}
 +
-+extern unsigned long pgkern_mask;
++static inline void set_cr4(unsigned long cr4)
++{
++	asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory");
++}
 +
-+#define __flush_tlb_all() __flush_tlb_global()
++#define __flush_tlb_all() xen_tlb_flush()
 +
 +#define __flush_tlb_one(addr)	xen_invlpg((unsigned long)addr)
 +
@@ -84135,33 +78610,9 @@
 +}
 +
 +#endif /* _X8664_TLBFLUSH_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/vga.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/vga.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/vga.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/vga.h	2006-11-19 14:27:11.000000000 +0100
-@@ -0,0 +1,20 @@
-+/*
-+ *	Access to VGA videoram
-+ *
-+ *	(c) 1998 Martin Mares <mj at ucw.cz>
-+ */
-+
-+#ifndef _LINUX_ASM_VGA_H_
-+#define _LINUX_ASM_VGA_H_
-+
-+/*
-+ *	On the PC, we can just recalculate addresses and then
-+ *	access the videoram directly without any black magic.
-+ */
-+
-+#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
-+
-+#define vga_readb(x) (*(x))
-+#define vga_writeb(x,y) (*(y) = (x))
-+
-+#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/xor.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/xor.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/asm/xor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/asm/xor.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/asm/xor.h b/include/asm-x86_64/mach-xen/asm/xor.h
+--- a/include/asm-x86_64/mach-xen/asm/xor.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/asm/xor.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,328 @@
 +/*
 + * x86-64 changes / gcc fixes from Andi Kleen. 
@@ -84179,7 +78630,7 @@
 +	preempt_disable();			\
 +	if (!(current_thread_info()->status & TS_USEDFPU))	\
 +		clts();				\
-+	__asm__ __volatile__ ( 			\
++	asm volatile (				\
 +		"movups %%xmm0,(%1)	;\n\t"	\
 +		"movups %%xmm1,0x10(%1)	;\n\t"	\
 +		"movups %%xmm2,0x20(%1)	;\n\t"	\
@@ -84491,9 +78942,9 @@
 +   We may also be able to load into the L1 only depending on how the cpu
 +   deals with a load to a line that is being prefetched.  */
 +#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/irq_vectors.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/irq_vectors.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/irq_vectors.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/irq_vectors.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/irq_vectors.h b/include/asm-x86_64/mach-xen/irq_vectors.h
+--- a/include/asm-x86_64/mach-xen/irq_vectors.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/irq_vectors.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,123 @@
 +/*
 + * This file should contain #defines for all of the interrupt vector
@@ -84618,9 +79069,9 @@
 +#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
 +
 +#endif /* _ASM_IRQ_VECTORS_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/mach_time.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/mach_time.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/mach_time.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/mach_time.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/mach_time.h b/include/asm-x86_64/mach-xen/mach_time.h
+--- a/include/asm-x86_64/mach-xen/mach_time.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/mach_time.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,111 @@
 +/*
 + *  include/asm-i386/mach-default/mach_time.h
@@ -84733,9 +79184,9 @@
 +}
 +
 +#endif /* !_MACH_TIME_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/mach_timer.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/mach_timer.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/mach_timer.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/mach_timer.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/mach_timer.h b/include/asm-x86_64/mach-xen/mach_timer.h
+--- a/include/asm-x86_64/mach-xen/mach_timer.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/mach_timer.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,48 @@
 +/*
 + *  include/asm-i386/mach-default/mach_timer.h
@@ -84785,9 +79236,9 @@
 +}
 +
 +#endif /* !_MACH_TIMER_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/setup_arch_post.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/setup_arch_post.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/setup_arch_post.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/setup_arch_post.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/setup_arch_post.h b/include/asm-x86_64/mach-xen/setup_arch_post.h
+--- a/include/asm-x86_64/mach-xen/setup_arch_post.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/setup_arch_post.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,58 @@
 +/**
 + * machine_specific_* - Hooks for machine specific setup.
@@ -84847,113 +79298,143 @@
 +	}
 +#endif
 +}
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/asm-x86_64/mach-xen/setup_arch_pre.h linux-2.6.18-xen/include/asm-x86_64/mach-xen/setup_arch_pre.h
---- linux-2.6.18.3/include/asm-x86_64/mach-xen/setup_arch_pre.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/asm-x86_64/mach-xen/setup_arch_pre.h	2006-11-19 14:27:11.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mach-xen/setup_arch_pre.h b/include/asm-x86_64/mach-xen/setup_arch_pre.h
+--- a/include/asm-x86_64/mach-xen/setup_arch_pre.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/asm-x86_64/mach-xen/setup_arch_pre.h	2007-03-14 10:55:49.000000000 +0100
 @@ -0,0 +1,5 @@
 +/* Hook to call BIOS initialisation function */
 +
 +#define ARCH_SETUP machine_specific_arch_setup();
 +
 +static void __init machine_specific_arch_setup(void);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/linux/elfnote.h linux-2.6.18-xen/include/linux/elfnote.h
---- linux-2.6.18.3/include/linux/elfnote.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/linux/elfnote.h	2006-11-19 14:27:14.000000000 +0100
-@@ -0,0 +1,90 @@
-+#ifndef _LINUX_ELFNOTE_H
-+#define _LINUX_ELFNOTE_H
-+/*
-+ * Helper macros to generate ELF Note structures, which are put into a
-+ * PT_NOTE segment of the final vmlinux image.  These are useful for
-+ * including name-value pairs of metadata into the kernel binary (or
-+ * modules?) for use by external programs.
-+ *
-+ * Each note has three parts: a name, a type and a desc.  The name is
-+ * intended to distinguish the note's originator, so it would be a
-+ * company, project, subsystem, etc; it must be in a suitable form for
-+ * use in a section name.  The type is an integer which is used to tag
-+ * the data, and is considered to be within the "name" namespace (so
-+ * "FooCo"'s type 42 is distinct from "BarProj"'s type 42).  The
-+ * "desc" field is the actual data.  There are no constraints on the
-+ * desc field's contents, though typically they're fairly small.
-+ *
-+ * All notes from a given NAME are put into a section named
-+ * .note.NAME.  When the kernel image is finally linked, all the notes
-+ * are packed into a single .notes section, which is mapped into the
-+ * PT_NOTE segment.  Because notes for a given name are grouped into
-+ * the same section, they'll all be adjacent the output file.
-+ *
-+ * This file defines macros for both C and assembler use.  Their
-+ * syntax is slightly different, but they're semantically similar.
-+ *
-+ * See the ELF specification for more detail about ELF notes.
-+ */
-+
-+#ifdef __ASSEMBLER__
-+/*
-+ * Generate a structure with the same shape as Elf{32,64}_Nhdr (which
-+ * turn out to be the same size and shape), followed by the name and
-+ * desc data with appropriate padding.  The 'desctype' argument is the
-+ * assembler pseudo op defining the type of the data e.g. .asciz while
-+ * 'descdata' is the data itself e.g.  "hello, world".
-+ *
-+ * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
-+ *      ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
-+ */
-+#define ELFNOTE(name, type, desctype, descdata)	\
-+.pushsection .note.name			;	\
-+  .align 4				;	\
-+  .long 2f - 1f		/* namesz */	;	\
-+  .long 4f - 3f		/* descsz */	;	\
-+  .long type				;	\
-+1:.asciz "name"				;	\
-+2:.align 4				;	\
-+3:desctype descdata			;	\
-+4:.align 4				;	\
-+.popsection				;
-+#else	/* !__ASSEMBLER__ */
-+#include <linux/elf.h>
-+/*
-+ * Use an anonymous structure which matches the shape of
-+ * Elf{32,64}_Nhdr, but includes the name and desc data.  The size and
-+ * type of name and desc depend on the macro arguments.  "name" must
-+ * be a literal string, and "desc" must be passed by value.  You may
-+ * only define one note per line, since __LINE__ is used to generate
-+ * unique symbols.
-+ */
-+#define _ELFNOTE_PASTE(a,b)	a##b
-+#define _ELFNOTE(size, name, unique, type, desc)			\
-+	static const struct {						\
-+		struct elf##size##_note _nhdr;				\
-+		unsigned char _name[sizeof(name)]			\
-+		__attribute__((aligned(sizeof(Elf##size##_Word))));	\
-+		typeof(desc) _desc					\
-+			     __attribute__((aligned(sizeof(Elf##size##_Word)))); \
-+	} _ELFNOTE_PASTE(_note_, unique)				\
-+		__attribute_used__					\
-+		__attribute__((section(".note." name),			\
-+			       aligned(sizeof(Elf##size##_Word)),	\
-+			       unused)) = {				\
-+		{							\
-+			sizeof(name),					\
-+			sizeof(desc),					\
-+			type,						\
-+		},							\
-+		name,							\
-+		desc							\
-+	}
-+#define ELFNOTE(size, name, type, desc)		\
-+	_ELFNOTE(size, name, __LINE__, type, desc)
-+
-+#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
-+#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
-+#endif	/* __ASSEMBLER__ */
-+
-+#endif /* _LINUX_ELFNOTE_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/linux/gfp.h linux-2.6.18-xen/include/linux/gfp.h
---- linux-2.6.18.3/include/linux/gfp.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/linux/gfp.h	2006-11-19 14:27:14.000000000 +0100
-@@ -99,7 +99,11 @@
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/mmu.h b/include/asm-x86_64/mmu.h
+--- a/include/asm-x86_64/mmu.h	2007-03-12 21:58:16.000000000 +0100
++++ b/include/asm-x86_64/mmu.h	2007-03-14 10:55:49.000000000 +0100
+@@ -15,6 +15,24 @@
+ 	rwlock_t ldtlock; 
+ 	int size;
+ 	struct semaphore sem; 
++#ifdef CONFIG_XEN
++	unsigned pinned:1;
++	unsigned has_foreign_mappings:1;
++	struct list_head unpinned;
++#endif
+ } mm_context_t;
+ 
++#ifdef CONFIG_XEN
++extern struct list_head mm_unpinned;
++extern spinlock_t mm_unpinned_lock;
++
++/* mm/memory.c:exit_mmap hook */
++extern void _arch_exit_mmap(struct mm_struct *mm);
++#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
++
++/* kernel/fork.c:dup_mmap hook */
++extern void _arch_dup_mmap(struct mm_struct *mm);
++#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
++#endif
++
+ #endif
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
+--- a/include/asm-x86_64/nmi.h	2007-03-16 18:49:42.000000000 +0100
++++ b/include/asm-x86_64/nmi.h	2007-03-14 10:55:49.000000000 +0100
+@@ -7,6 +7,10 @@
+ #include <linux/pm.h>
+ #include <asm/io.h>
+  
++#ifdef CONFIG_XEN
++#include <xen/interface/nmi.h>
++#endif
++
+ /**
+  * do_nmi_callback
+  *
+@@ -39,7 +43,25 @@
+ extern void default_do_nmi(struct pt_regs *);
+ extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
+ 
++#ifdef CONFIG_XEN
++static inline unsigned char get_nmi_reason(void)
++{
++        shared_info_t *s = HYPERVISOR_shared_info;
++        unsigned char reason = 0;
++
++        /* construct a value which looks like it came from
++         * port 0x61.
++         */
++        if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
++                reason |= 0x40;
++        if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
++                reason |= 0x80;
++
++        return reason;
++}
++#else
+ #define get_nmi_reason() inb(0x61)
++#endif
+ 
+ extern int panic_on_timeout;
+ extern int unknown_nmi_panic;
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
+--- a/include/asm-x86_64/pci.h	2007-03-12 21:58:16.000000000 +0100
++++ b/include/asm-x86_64/pci.h	2007-03-14 10:55:49.000000000 +0100
+@@ -75,6 +75,23 @@
+ #define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
+ 	(((PTR)->LEN_NAME) = (VAL))
+ 
++#elif defined(CONFIG_SWIOTLB)
++
++#define pci_dac_dma_supported(pci_dev, mask)    1
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
++	dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
++	__u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME)			\
++	((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
++	(((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME)			\
++	((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
++	(((PTR)->LEN_NAME) = (VAL))
++
+ #else
+ /* No IOMMU */
+ 
+@@ -146,4 +163,13 @@
+ #include <asm-generic/pci.h>
+ #endif
+ 
++#ifdef CONFIG_XEN
++/* On Xen we have to scan all functions since Xen hides bridges from
++ * us.  If a bridge is at fn=0 and that slot has a multifunction
++ * device, we won't find the additional devices without scanning all
++ * functions. */
++#undef pcibios_scan_all_fns
++#define pcibios_scan_all_fns(a, b)	1
++#endif
++
+ #endif /* __x8664_PCI_H */
+diff -x '.hg*' -x '.git*' -urN a/include/asm-x86_64/vga.h b/include/asm-x86_64/vga.h
+--- a/include/asm-x86_64/vga.h	2007-03-12 21:58:16.000000000 +0100
++++ b/include/asm-x86_64/vga.h	2007-03-14 10:55:49.000000000 +0100
+@@ -12,7 +12,11 @@
+  *	access the videoram directly without any black magic.
+  */
+ 
++#ifdef CONFIG_XEN
++#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
++#else
+ #define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
++#endif
+ 
+ #define vga_readb(x) (*(x))
+ #define vga_writeb(x,y) (*(y) = (x))
+diff -x '.hg*' -x '.git*' -urN a/include/linux/gfp.h b/include/linux/gfp.h
+--- a/include/linux/gfp.h	2007-03-16 18:49:43.000000000 +0100
++++ b/include/linux/gfp.h	2007-03-14 10:55:50.000000000 +0100
+@@ -114,7 +114,11 @@
   */
  
  #ifndef HAVE_ARCH_FREE_PAGE
@@ -84964,15 +79445,15 @@
 + */
 +static inline int arch_free_page(struct page *page, int order) { return 0; }
  #endif
- 
- extern struct page *
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/linux/highmem.h linux-2.6.18-xen/include/linux/highmem.h
---- linux-2.6.18.3/include/linux/highmem.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/linux/highmem.h	2006-11-19 14:27:14.000000000 +0100
-@@ -24,10 +24,16 @@
- 
+ #ifndef HAVE_ARCH_ALLOC_PAGE
+ static inline void arch_alloc_page(struct page *page, int order) { }
+diff -x '.hg*' -x '.git*' -urN a/include/linux/highmem.h b/include/linux/highmem.h
+--- a/include/linux/highmem.h	2007-03-16 18:49:43.000000000 +0100
++++ b/include/linux/highmem.h	2007-03-14 10:55:50.000000000 +0100
+@@ -26,10 +26,16 @@
  /* declarations for linux/mm/highmem.c */
  unsigned int nr_free_highpages(void);
+ extern unsigned long totalhigh_pages;
 +#ifdef CONFIG_XEN
 +void kmap_flush_unused(void);
 +#endif
@@ -84984,12 +79465,12 @@
 +static inline void kmap_flush_unused(void) { }
 +#endif
  
- static inline void *kmap(struct page *page)
- {
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/linux/interrupt.h linux-2.6.18-xen/include/linux/interrupt.h
---- linux-2.6.18.3/include/linux/interrupt.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/linux/interrupt.h	2006-11-19 14:27:14.000000000 +0100
-@@ -166,6 +166,12 @@
+ #define totalhigh_pages 0
+ 
+diff -x '.hg*' -x '.git*' -urN a/include/linux/interrupt.h b/include/linux/interrupt.h
+--- a/include/linux/interrupt.h	2007-03-16 18:49:43.000000000 +0100
++++ b/include/linux/interrupt.h	2007-03-14 10:55:50.000000000 +0100
+@@ -184,6 +184,12 @@
  
  #endif /* CONFIG_GENERIC_HARDIRQS */
  
@@ -85002,20 +79483,20 @@
  #ifndef __ARCH_SET_SOFTIRQ_PENDING
  #define set_softirq_pending(x) (local_softirq_pending() = (x))
  #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/linux/mm.h linux-2.6.18-xen/include/linux/mm.h
---- linux-2.6.18.3/include/linux/mm.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/linux/mm.h	2006-11-19 14:27:14.000000000 +0100
-@@ -164,6 +164,9 @@
- #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
+diff -x '.hg*' -x '.git*' -urN a/include/linux/mm.h b/include/linux/mm.h
+--- a/include/linux/mm.h	2007-03-16 18:49:43.000000000 +0100
++++ b/include/linux/mm.h	2007-03-14 10:55:50.000000000 +0100
+@@ -169,6 +169,9 @@
  #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
  #define VM_INSERTPAGE	0x02000000	/* The vma has had "vm_insert_page()" done on it */
+ #define VM_ALWAYSDUMP	0x04000000	/* Always include in core dumps */
 +#ifdef CONFIG_XEN
-+#define VM_FOREIGN	0x04000000	/* Has pages belonging to another VM */
++#define VM_FOREIGN	0x08000000	/* Has pages belonging to another VM */
 +#endif
  
  #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
  #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
-@@ -1027,6 +1030,13 @@
+@@ -1129,6 +1132,13 @@
  #define FOLL_GET	0x04	/* do get_page on page */
  #define FOLL_ANON	0x08	/* give ZERO_PAGE if no pgtable */
  
@@ -85029,9 +79510,9 @@
  #ifdef CONFIG_PROC_FS
  void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
  #else
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/linux/oprofile.h linux-2.6.18-xen/include/linux/oprofile.h
---- linux-2.6.18.3/include/linux/oprofile.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/linux/oprofile.h	2006-11-19 14:27:14.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/linux/oprofile.h b/include/linux/oprofile.h
+--- a/include/linux/oprofile.h	2007-03-12 21:58:16.000000000 +0100
++++ b/include/linux/oprofile.h	2007-03-14 10:55:50.000000000 +0100
 @@ -16,6 +16,10 @@
  #include <linux/types.h>
  #include <linux/spinlock.h>
@@ -85064,9 +79545,9 @@
  
  /**
   * Create a file of the given name as a child of the given root, with
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/linux/pfn.h linux-2.6.18-xen/include/linux/pfn.h
---- linux-2.6.18.3/include/linux/pfn.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/linux/pfn.h	2006-12-05 18:42:37.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/linux/pfn.h b/include/linux/pfn.h
+--- a/include/linux/pfn.h	2007-03-12 21:58:16.000000000 +0100
++++ b/include/linux/pfn.h	2007-03-14 10:55:50.000000000 +0100
 @@ -4,6 +4,10 @@
  #define PFN_ALIGN(x)	(((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
  #define PFN_UP(x)	(((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
@@ -85078,10 +79559,10 @@
 +#endif
  
  #endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/linux/skbuff.h linux-2.6.18-xen/include/linux/skbuff.h
---- linux-2.6.18.3/include/linux/skbuff.h	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/include/linux/skbuff.h	2006-11-19 14:27:15.000000000 +0100
-@@ -203,6 +203,8 @@
+diff -x '.hg*' -x '.git*' -urN a/include/linux/skbuff.h b/include/linux/skbuff.h
+--- a/include/linux/skbuff.h	2007-03-16 18:49:44.000000000 +0100
++++ b/include/linux/skbuff.h	2007-03-14 10:55:51.000000000 +0100
+@@ -202,6 +202,8 @@
   *	@local_df: allow local fragmentation
   *	@cloned: Head may be cloned (check refcnt to be sure)
   *	@nohdr: Payload reference only, must not modify header
@@ -85090,7 +79571,7 @@
   *	@pkt_type: Packet class
   *	@fclone: skbuff clone status
   *	@ip_summed: Driver fed us an IP checksum
-@@ -282,7 +284,13 @@
+@@ -284,7 +286,13 @@
  				nfctinfo:3;
  	__u8			pkt_type:3,
  				fclone:2,
@@ -85104,9 +79585,9 @@
  	__be16			protocol;
  
  	void			(*destructor)(struct sk_buff *skb);
-@@ -345,7 +353,8 @@
+@@ -347,7 +355,8 @@
  
- extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+ extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
  					    unsigned int size,
 -					    gfp_t priority);
 +					    gfp_t priority,
@@ -85114,7 +79595,7 @@
  extern void	       kfree_skbmem(struct sk_buff *skb);
  extern struct sk_buff *skb_clone(struct sk_buff *skb,
  				 gfp_t priority);
-@@ -1086,6 +1095,7 @@
+@@ -1088,6 +1097,7 @@
  		kfree_skb(skb);
  }
  
@@ -85122,7 +79603,7 @@
  /**
   *	__dev_alloc_skb - allocate an skbuff for receiving
   *	@length: length to allocate
-@@ -1106,6 +1116,9 @@
+@@ -1108,6 +1118,9 @@
  		skb_reserve(skb, NET_SKB_PAD);
  	return skb;
  }
@@ -85132,9 +79613,9 @@
  
  /**
   *	dev_alloc_skb - allocate an skbuff for receiving
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/balloon.h linux-2.6.18-xen/include/xen/balloon.h
---- linux-2.6.18.3/include/xen/balloon.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/balloon.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/balloon.h b/include/xen/balloon.h
+--- a/include/xen/balloon.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/balloon.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,57 @@
 +/******************************************************************************
 + * balloon.h
@@ -85193,9 +79674,9 @@
 +#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
 +
 +#endif /* __ASM_BALLOON_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/cpu_hotplug.h linux-2.6.18-xen/include/xen/cpu_hotplug.h
---- linux-2.6.18.3/include/xen/cpu_hotplug.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/cpu_hotplug.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/cpu_hotplug.h b/include/xen/cpu_hotplug.h
+--- a/include/xen/cpu_hotplug.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/cpu_hotplug.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,43 @@
 +#ifndef __XEN_CPU_HOTPLUG_H__
 +#define __XEN_CPU_HOTPLUG_H__
@@ -85240,9 +79721,9 @@
 +#endif /* !defined(CONFIG_HOTPLUG_CPU) */
 +
 +#endif /* __XEN_CPU_HOTPLUG_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/driver_util.h linux-2.6.18-xen/include/xen/driver_util.h
---- linux-2.6.18.3/include/xen/driver_util.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/driver_util.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/driver_util.h b/include/xen/driver_util.h
+--- a/include/xen/driver_util.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/driver_util.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,15 @@
 +
 +#ifndef __ASM_XEN_DRIVER_UTIL_H__
@@ -85259,9 +79740,9 @@
 +extern void unlock_vm_area(struct vm_struct *area);
 +
 +#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/evtchn.h linux-2.6.18-xen/include/xen/evtchn.h
---- linux-2.6.18.3/include/xen/evtchn.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/evtchn.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/evtchn.h b/include/xen/evtchn.h
+--- a/include/xen/evtchn.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/evtchn.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,113 @@
 +/******************************************************************************
 + * evtchn.h
@@ -85320,21 +79801,21 @@
 + */
 +extern int bind_evtchn_to_irqhandler(
 +	unsigned int evtchn,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	irq_handler_t handler,
 +	unsigned long irqflags,
 +	const char *devname,
 +	void *dev_id);
 +extern int bind_virq_to_irqhandler(
 +	unsigned int virq,
 +	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	irq_handler_t handler,
 +	unsigned long irqflags,
 +	const char *devname,
 +	void *dev_id);
 +extern int bind_ipi_to_irqhandler(
 +	unsigned int ipi,
 +	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	irq_handler_t handler,
 +	unsigned long irqflags,
 +	const char *devname,
 +	void *dev_id);
@@ -85376,9 +79857,9 @@
 +extern void notify_remote_via_irq(int irq);
 +
 +#endif /* __ASM_EVTCHN_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/features.h linux-2.6.18-xen/include/xen/features.h
---- linux-2.6.18.3/include/xen/features.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/features.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/features.h b/include/xen/features.h
+--- a/include/xen/features.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/features.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,20 @@
 +/******************************************************************************
 + * features.h
@@ -85400,9 +79881,9 @@
 +#define xen_feature(flag)	(xen_features[flag])
 +
 +#endif /* __ASM_XEN_FEATURES_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/foreign_page.h linux-2.6.18-xen/include/xen/foreign_page.h
---- linux-2.6.18.3/include/xen/foreign_page.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/foreign_page.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/foreign_page.h b/include/xen/foreign_page.h
+--- a/include/xen/foreign_page.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/foreign_page.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,30 @@
 +/******************************************************************************
 + * foreign_page.h
@@ -85434,9 +79915,9 @@
 +	( (void (*) (struct page *)) (page)->mapping )
 +
 +#endif /* __ASM_XEN_FOREIGN_PAGE_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/gnttab.h linux-2.6.18-xen/include/xen/gnttab.h
---- linux-2.6.18.3/include/xen/gnttab.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/gnttab.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/gnttab.h b/include/xen/gnttab.h
+--- a/include/xen/gnttab.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/gnttab.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,150 @@
 +/******************************************************************************
 + * gnttab.h
@@ -85588,9 +80069,9 @@
 +}
 +
 +#endif /* __ASM_GNTTAB_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/hvm.h linux-2.6.18-xen/include/xen/hvm.h
---- linux-2.6.18.3/include/xen/hvm.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/hvm.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/hvm.h b/include/xen/hvm.h
+--- a/include/xen/hvm.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/hvm.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,24 @@
 +/* Simple wrappers around HVM functions */
 +#ifndef XEN_HVM_H__
@@ -85616,9 +80097,9 @@
 +}
 +
 +#endif /* XEN_HVM_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/hypervisor_sysfs.h linux-2.6.18-xen/include/xen/hypervisor_sysfs.h
---- linux-2.6.18.3/include/xen/hypervisor_sysfs.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/hypervisor_sysfs.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/hypervisor_sysfs.h b/include/xen/hypervisor_sysfs.h
+--- a/include/xen/hypervisor_sysfs.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/hypervisor_sysfs.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,32 @@
 +/*
 + *  copyright (c) 2006 IBM Corporation
@@ -85652,9 +80133,9 @@
 +};
 +
 +#endif /* _HYP_SYSFS_H_ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/acm.h linux-2.6.18-xen/include/xen/interface/acm.h
---- linux-2.6.18.3/include/xen/interface/acm.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/acm.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/acm.h b/include/xen/interface/acm.h
+--- a/include/xen/interface/acm.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/acm.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,187 @@
 +/*
 + * acm.h: Xen access control module interface defintions
@@ -85843,9 +80324,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/acm_ops.h linux-2.6.18-xen/include/xen/interface/acm_ops.h
---- linux-2.6.18.3/include/xen/interface/acm_ops.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/acm_ops.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/acm_ops.h b/include/xen/interface/acm_ops.h
+--- a/include/xen/interface/acm_ops.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/acm_ops.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,102 @@
 +/*
 + * acm_ops.h: Xen access control module hypervisor commands
@@ -85949,9 +80430,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/arch-ia64.h linux-2.6.18-xen/include/xen/interface/arch-ia64.h
---- linux-2.6.18.3/include/xen/interface/arch-ia64.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/arch-ia64.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/arch-ia64.h b/include/xen/interface/arch-ia64.h
+--- a/include/xen/interface/arch-ia64.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/arch-ia64.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,448 @@
 +/******************************************************************************
 + * arch-ia64/hypervisor-if.h
@@ -86401,9 +80882,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/arch-powerpc.h linux-2.6.18-xen/include/xen/interface/arch-powerpc.h
---- linux-2.6.18.3/include/xen/interface/arch-powerpc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/arch-powerpc.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/arch-powerpc.h b/include/xen/interface/arch-powerpc.h
+--- a/include/xen/interface/arch-powerpc.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/arch-powerpc.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,115 @@
 +/*
 + * This program is free software; you can redistribute it and/or modify
@@ -86520,9 +81001,9 @@
 +#endif
 +
 +#endif
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/arch-x86_32.h linux-2.6.18-xen/include/xen/interface/arch-x86_32.h
---- linux-2.6.18.3/include/xen/interface/arch-x86_32.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/arch-x86_32.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/arch-x86_32.h b/include/xen/interface/arch-x86_32.h
+--- a/include/xen/interface/arch-x86_32.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/arch-x86_32.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,255 @@
 +/******************************************************************************
 + * arch-x86_32.h
@@ -86779,9 +81260,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/arch-x86_64.h linux-2.6.18-xen/include/xen/interface/arch-x86_64.h
---- linux-2.6.18.3/include/xen/interface/arch-x86_64.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/arch-x86_64.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/arch-x86_64.h b/include/xen/interface/arch-x86_64.h
+--- a/include/xen/interface/arch-x86_64.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/arch-x86_64.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,322 @@
 +/******************************************************************************
 + * arch-x86_64.h
@@ -87105,9 +81586,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/callback.h linux-2.6.18-xen/include/xen/interface/callback.h
---- linux-2.6.18.3/include/xen/interface/callback.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/callback.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/callback.h b/include/xen/interface/callback.h
+--- a/include/xen/interface/callback.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/callback.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,74 @@
 +/******************************************************************************
 + * callback.h
@@ -87183,9 +81664,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/dom0_ops.h linux-2.6.18-xen/include/xen/interface/dom0_ops.h
---- linux-2.6.18.3/include/xen/interface/dom0_ops.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/dom0_ops.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/dom0_ops.h b/include/xen/interface/dom0_ops.h
+--- a/include/xen/interface/dom0_ops.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/dom0_ops.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,102 @@
 +/******************************************************************************
 + * dom0_ops.h
@@ -87289,9 +81770,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/domctl.h linux-2.6.18-xen/include/xen/interface/domctl.h
---- linux-2.6.18.3/include/xen/interface/domctl.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/domctl.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/domctl.h b/include/xen/interface/domctl.h
+--- a/include/xen/interface/domctl.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/domctl.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,392 @@
 +/******************************************************************************
 + * domctl.h
@@ -87685,9 +82166,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/elfnote.h linux-2.6.18-xen/include/xen/interface/elfnote.h
---- linux-2.6.18.3/include/xen/interface/elfnote.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/elfnote.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
+--- a/include/xen/interface/elfnote.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/elfnote.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,133 @@
 +/******************************************************************************
 + * elfnote.h
@@ -87822,9 +82303,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/event_channel.h linux-2.6.18-xen/include/xen/interface/event_channel.h
---- linux-2.6.18.3/include/xen/interface/event_channel.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/event_channel.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/event_channel.h b/include/xen/interface/event_channel.h
+--- a/include/xen/interface/event_channel.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/event_channel.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,233 @@
 +/******************************************************************************
 + * event_channel.h
@@ -88059,9 +82540,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/features.h linux-2.6.18-xen/include/xen/interface/features.h
---- linux-2.6.18.3/include/xen/interface/features.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/features.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/features.h b/include/xen/interface/features.h
+--- a/include/xen/interface/features.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/features.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,53 @@
 +/******************************************************************************
 + * features.h
@@ -88116,9 +82597,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/grant_table.h linux-2.6.18-xen/include/xen/interface/grant_table.h
---- linux-2.6.18.3/include/xen/interface/grant_table.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/grant_table.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
+--- a/include/xen/interface/grant_table.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/grant_table.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,362 @@
 +/******************************************************************************
 + * grant_table.h
@@ -88482,9 +82963,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/hvm/e820.h linux-2.6.18-xen/include/xen/interface/hvm/e820.h
---- linux-2.6.18.3/include/xen/interface/hvm/e820.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/hvm/e820.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/hvm/e820.h b/include/xen/interface/hvm/e820.h
+--- a/include/xen/interface/hvm/e820.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/hvm/e820.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,32 @@
 +#ifndef __XEN_PUBLIC_HVM_E820_H__
 +#define __XEN_PUBLIC_HVM_E820_H__
@@ -88518,9 +82999,9 @@
 +#define HVM_BELOW_4G_MMIO_LENGTH    ((1ULL << 32) - HVM_BELOW_4G_MMIO_START)
 +
 +#endif /* __XEN_PUBLIC_HVM_E820_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/hvm/hvm_info_table.h linux-2.6.18-xen/include/xen/interface/hvm/hvm_info_table.h
---- linux-2.6.18.3/include/xen/interface/hvm/hvm_info_table.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/hvm/hvm_info_table.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/hvm/hvm_info_table.h b/include/xen/interface/hvm/hvm_info_table.h
+--- a/include/xen/interface/hvm/hvm_info_table.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/hvm/hvm_info_table.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,22 @@
 +/******************************************************************************
 + * hvm/hvm_info_table.h
@@ -88544,9 +83025,9 @@
 +};
 +
 +#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/hvm/ioreq.h linux-2.6.18-xen/include/xen/interface/hvm/ioreq.h
---- linux-2.6.18.3/include/xen/interface/hvm/ioreq.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/hvm/ioreq.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/hvm/ioreq.h b/include/xen/interface/hvm/ioreq.h
+--- a/include/xen/interface/hvm/ioreq.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/hvm/ioreq.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,99 @@
 +/*
 + * ioreq.h: I/O request definitions for device models
@@ -88647,9 +83128,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/hvm/params.h linux-2.6.18-xen/include/xen/interface/hvm/params.h
---- linux-2.6.18.3/include/xen/interface/hvm/params.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/hvm/params.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/hvm/params.h b/include/xen/interface/hvm/params.h
+--- a/include/xen/interface/hvm/params.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/hvm/params.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,24 @@
 +#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
 +#define __XEN_PUBLIC_HVM_PARAMS_H__
@@ -88675,9 +83156,9 @@
 +DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
 +
 +#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/hvm/vmx_assist.h linux-2.6.18-xen/include/xen/interface/hvm/vmx_assist.h
---- linux-2.6.18.3/include/xen/interface/hvm/vmx_assist.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/hvm/vmx_assist.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/hvm/vmx_assist.h b/include/xen/interface/hvm/vmx_assist.h
+--- a/include/xen/interface/hvm/vmx_assist.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/hvm/vmx_assist.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,98 @@
 +/*
 + * vmx_assist.h: Context definitions for the VMXASSIST world switch.
@@ -88777,9 +83258,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/io/blkif.h linux-2.6.18-xen/include/xen/interface/io/blkif.h
---- linux-2.6.18.3/include/xen/interface/io/blkif.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/io/blkif.h	2006-11-19 14:27:15.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
+--- a/include/xen/interface/io/blkif.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/io/blkif.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,87 @@
 +/******************************************************************************
 + * blkif.h
@@ -88868,9 +83349,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/io/console.h linux-2.6.18-xen/include/xen/interface/io/console.h
---- linux-2.6.18.3/include/xen/interface/io/console.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/io/console.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/io/console.h b/include/xen/interface/io/console.h
+--- a/include/xen/interface/io/console.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/io/console.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,33 @@
 +/******************************************************************************
 + * console.h
@@ -88905,9 +83386,241 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/io/netif.h linux-2.6.18-xen/include/xen/interface/io/netif.h
---- linux-2.6.18.3/include/xen/interface/io/netif.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/io/netif.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/io/fbif.h b/include/xen/interface/io/fbif.h
+--- a/include/xen/interface/io/fbif.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/io/fbif.h	2007-03-14 10:55:52.000000000 +0100
+@@ -0,0 +1,116 @@
++/*
++ * fbif.h -- Xen virtual frame buffer device
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori at us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License. See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
++
++#ifndef __XEN_PUBLIC_IO_FBIF_H__
++#define __XEN_PUBLIC_IO_FBIF_H__
++
++#include <asm/types.h>
++
++/* Out events (frontend -> backend) */
++
++/*
++ * Out events may be sent only when requested by backend, and receipt
++ * of an unknown out event is an error.
++ */
++
++/* Event type 1 currently not used */
++/*
++ * Framebuffer update notification event
++ * Capable frontend sets feature-update in xenstore.
++ * Backend requests it by setting request-update in xenstore.
++ */
++#define XENFB_TYPE_UPDATE 2
++
++struct xenfb_update
++{
++	__u8 type;		/* XENFB_TYPE_UPDATE */
++	__s32 x;		/* source x */
++	__s32 y;		/* source y */
++	__s32 width;		/* rect width */
++	__s32 height;		/* rect height */
++};
++
++#define XENFB_OUT_EVENT_SIZE 40
++
++union xenfb_out_event
++{
++	__u8 type;
++	struct xenfb_update update;
++	char pad[XENFB_OUT_EVENT_SIZE];
++};
++
++/* In events (backend -> frontend) */
++
++/*
++ * Frontends should ignore unknown in events.
++ * No in events currently defined.
++ */
++
++#define XENFB_IN_EVENT_SIZE 40
++
++union xenfb_in_event
++{
++	__u8 type;
++	char pad[XENFB_IN_EVENT_SIZE];
++};
++
++/* shared page */
++
++#define XENFB_IN_RING_SIZE 1024
++#define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
++#define XENFB_IN_RING_OFFS 1024
++#define XENFB_IN_RING(page) \
++    ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
++#define XENFB_IN_RING_REF(page, idx) \
++    (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
++
++#define XENFB_OUT_RING_SIZE 2048
++#define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
++#define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
++#define XENFB_OUT_RING(page) \
++    ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
++#define XENFB_OUT_RING_REF(page, idx) \
++    (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
++
++struct xenfb_page
++{
++	__u32 in_cons, in_prod;
++	__u32 out_cons, out_prod;
++
++	__s32 width;         /* the width of the framebuffer (in pixels) */
++	__s32 height;        /* the height of the framebuffer (in pixels) */
++	__u32 line_length;   /* the length of a row of pixels (in bytes) */
++	__u32 mem_length;    /* the length of the framebuffer (in bytes) */
++	__u8 depth;          /* the depth of a pixel (in bits) */
++
++	/*
++	 * Framebuffer page directory
++	 *
++	 * Each directory page holds PAGE_SIZE / sizeof(*pd)
++	 * framebuffer pages, and can thus map up to PAGE_SIZE *
++	 * PAGE_SIZE / sizeof(*pd) bytes.  With PAGE_SIZE == 4096 and
++	 * sizeof(unsigned long) == 4, that's 4 Megs.  Two directory
++	 * pages should be enough for a while.
++	 */
++	unsigned long pd[2];
++};
++
++/*
++ * Wart: xenkbd needs to know resolution.  Put it here until a better
++ * solution is found, but don't leak it to the backend.
++ */
++#ifdef __KERNEL__
++#define XENFB_WIDTH 800
++#define XENFB_HEIGHT 600
++#define XENFB_DEPTH 32
++#endif
++
++#endif
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
+--- a/include/xen/interface/io/kbdif.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/io/kbdif.h	2007-03-14 10:55:52.000000000 +0100
+@@ -0,0 +1,108 @@
++/*
++ * kbdif.h -- Xen virtual keyboard/mouse
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori at us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru at redhat.com>
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License. See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
++
++#ifndef __XEN_PUBLIC_IO_KBDIF_H__
++#define __XEN_PUBLIC_IO_KBDIF_H__
++
++#include <asm/types.h>
++
++/* In events (backend -> frontend) */
++
++/*
++ * Frontends should ignore unknown in events.
++ */
++
++/* Pointer movement event */
++#define XENKBD_TYPE_MOTION  1
++/* Event type 2 currently not used */
++/* Key event (includes pointer buttons) */
++#define XENKBD_TYPE_KEY     3
++/*
++ * Pointer position event
++ * Capable backend sets feature-abs-pointer in xenstore.
++ * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
++ * request-abs-update in xenstore.
++ */
++#define XENKBD_TYPE_POS     4
++
++struct xenkbd_motion
++{
++	__u8 type;         /* XENKBD_TYPE_MOTION */
++	__s32 rel_x;       /* relative X motion */
++	__s32 rel_y;       /* relative Y motion */
++};
++
++struct xenkbd_key
++{
++	__u8 type;         /* XENKBD_TYPE_KEY */
++	__u8 pressed;      /* 1 if pressed; 0 otherwise */
++	__u32 keycode;     /* KEY_* from linux/input.h */
++};
++
++struct xenkbd_position
++{
++	__u8 type;         /* XENKBD_TYPE_POS */
++	__s32 abs_x;       /* absolute X position (in FB pixels) */
++	__s32 abs_y;       /* absolute Y position (in FB pixels) */
++};
++
++#define XENKBD_IN_EVENT_SIZE 40
++
++union xenkbd_in_event
++{
++	__u8 type;
++	struct xenkbd_motion motion;
++	struct xenkbd_key key;
++	struct xenkbd_position pos;
++	char pad[XENKBD_IN_EVENT_SIZE];
++};
++
++/* Out events (frontend -> backend) */
++
++/*
++ * Out events may be sent only when requested by backend, and receipt
++ * of an unknown out event is an error.
++ * No out events currently defined.
++ */
++
++#define XENKBD_OUT_EVENT_SIZE 40
++
++union xenkbd_out_event
++{
++	__u8 type;
++	char pad[XENKBD_OUT_EVENT_SIZE];
++};
++
++/* shared page */
++
++#define XENKBD_IN_RING_SIZE 2048
++#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
++#define XENKBD_IN_RING_OFFS 1024
++#define XENKBD_IN_RING(page) \
++    ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
++#define XENKBD_IN_RING_REF(page, idx) \
++    (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
++
++#define XENKBD_OUT_RING_SIZE 1024
++#define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
++#define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
++#define XENKBD_OUT_RING(page) \
++    ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
++#define XENKBD_OUT_RING_REF(page, idx) \
++    (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
++
++struct xenkbd_page
++{
++	__u32 in_cons, in_prod;
++	__u32 out_cons, out_prod;
++};
++
++#endif
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
+--- a/include/xen/interface/io/netif.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/io/netif.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,166 @@
 +/******************************************************************************
 + * netif.h
@@ -89075,9 +83788,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/io/pciif.h linux-2.6.18-xen/include/xen/interface/io/pciif.h
---- linux-2.6.18.3/include/xen/interface/io/pciif.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/io/pciif.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/io/pciif.h b/include/xen/interface/io/pciif.h
+--- a/include/xen/interface/io/pciif.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/io/pciif.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,55 @@
 +/*
 + * PCI Backend/Frontend Common Data Structures & Macros
@@ -89134,9 +83847,9 @@
 +};
 +
 +#endif /* __XEN_PCI_COMMON_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/io/ring.h linux-2.6.18-xen/include/xen/interface/io/ring.h
---- linux-2.6.18.3/include/xen/interface/io/ring.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/io/ring.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
+--- a/include/xen/interface/io/ring.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/io/ring.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,273 @@
 +/******************************************************************************
 + * ring.h
@@ -89411,9 +84124,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/io/tpmif.h linux-2.6.18-xen/include/xen/interface/io/tpmif.h
---- linux-2.6.18.3/include/xen/interface/io/tpmif.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/io/tpmif.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/io/tpmif.h b/include/xen/interface/io/tpmif.h
+--- a/include/xen/interface/io/tpmif.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/io/tpmif.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,59 @@
 +/******************************************************************************
 + * tpmif.h
@@ -89474,9 +84187,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/io/xenbus.h linux-2.6.18-xen/include/xen/interface/io/xenbus.h
---- linux-2.6.18.3/include/xen/interface/io/xenbus.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/io/xenbus.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/io/xenbus.h b/include/xen/interface/io/xenbus.h
+--- a/include/xen/interface/io/xenbus.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/io/xenbus.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,45 @@
 +/*****************************************************************************
 + * xenbus.h
@@ -89523,9 +84236,9 @@
 +typedef enum xenbus_state XenbusState;
 +
 +#endif /* _XEN_PUBLIC_IO_XENBUS_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/io/xs_wire.h linux-2.6.18-xen/include/xen/interface/io/xs_wire.h
---- linux-2.6.18.3/include/xen/interface/io/xs_wire.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/io/xs_wire.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
+--- a/include/xen/interface/io/xs_wire.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/io/xs_wire.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,97 @@
 +/*
 + * Details of the "wire" protocol between Xen Store Daemon and client
@@ -89624,9 +84337,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/memory.h linux-2.6.18-xen/include/xen/interface/memory.h
---- linux-2.6.18.3/include/xen/interface/memory.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/memory.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/memory.h b/include/xen/interface/memory.h
+--- a/include/xen/interface/memory.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/memory.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,243 @@
 +/******************************************************************************
 + * memory.h
@@ -89871,9 +84584,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/nmi.h linux-2.6.18-xen/include/xen/interface/nmi.h
---- linux-2.6.18.3/include/xen/interface/nmi.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/nmi.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
+--- a/include/xen/interface/nmi.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/nmi.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,60 @@
 +/******************************************************************************
 + * nmi.h
@@ -89935,9 +84648,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/physdev.h linux-2.6.18-xen/include/xen/interface/physdev.h
---- linux-2.6.18.3/include/xen/interface/physdev.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/physdev.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
+--- a/include/xen/interface/physdev.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/physdev.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,149 @@
 +
 +#ifndef __XEN_PUBLIC_PHYSDEV_H__
@@ -90088,9 +84801,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/platform.h linux-2.6.18-xen/include/xen/interface/platform.h
---- linux-2.6.18.3/include/xen/interface/platform.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/platform.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/platform.h b/include/xen/interface/platform.h
+--- a/include/xen/interface/platform.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/platform.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,125 @@
 +/******************************************************************************
 + * platform.h
@@ -90217,9 +84930,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/sched.h linux-2.6.18-xen/include/xen/interface/sched.h
---- linux-2.6.18.3/include/xen/interface/sched.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/sched.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/sched.h b/include/xen/interface/sched.h
+--- a/include/xen/interface/sched.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/sched.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,103 @@
 +/******************************************************************************
 + * sched.h
@@ -90324,9 +85037,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/sysctl.h linux-2.6.18-xen/include/xen/interface/sysctl.h
---- linux-2.6.18.3/include/xen/interface/sysctl.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/sysctl.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/sysctl.h b/include/xen/interface/sysctl.h
+--- a/include/xen/interface/sysctl.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/sysctl.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,151 @@
 +/******************************************************************************
 + * sysctl.h
@@ -90479,9 +85192,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/trace.h linux-2.6.18-xen/include/xen/interface/trace.h
---- linux-2.6.18.3/include/xen/interface/trace.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/trace.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/trace.h b/include/xen/interface/trace.h
+--- a/include/xen/interface/trace.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/trace.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,87 @@
 +/******************************************************************************
 + * include/public/trace.h
@@ -90570,9 +85283,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/vcpu.h linux-2.6.18-xen/include/xen/interface/vcpu.h
---- linux-2.6.18.3/include/xen/interface/vcpu.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/vcpu.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
+--- a/include/xen/interface/vcpu.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/vcpu.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,121 @@
 +/******************************************************************************
 + * vcpu.h
@@ -90695,9 +85408,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/version.h linux-2.6.18-xen/include/xen/interface/version.h
---- linux-2.6.18.3/include/xen/interface/version.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/version.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/version.h b/include/xen/interface/version.h
+--- a/include/xen/interface/version.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/version.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,73 @@
 +/******************************************************************************
 + * version.h
@@ -90772,9 +85485,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/xencomm.h linux-2.6.18-xen/include/xen/interface/xencomm.h
---- linux-2.6.18.3/include/xen/interface/xencomm.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/xencomm.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h
+--- a/include/xen/interface/xencomm.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/xencomm.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,37 @@
 +/*
 + * Copyright (C) 2006 Hollis Blanchard <hollisb at us.ibm.com>, IBM Corporation
@@ -90813,9 +85526,9 @@
 +};
 +
 +#endif /* _XEN_XENCOMM_H_ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/xen-compat.h linux-2.6.18-xen/include/xen/interface/xen-compat.h
---- linux-2.6.18.3/include/xen/interface/xen-compat.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/xen-compat.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/xen-compat.h b/include/xen/interface/xen-compat.h
+--- a/include/xen/interface/xen-compat.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/xen-compat.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,26 @@
 +/******************************************************************************
 + * xen-compat.h
@@ -90843,9 +85556,9 @@
 +#endif
 +
 +#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/xen.h linux-2.6.18-xen/include/xen/interface/xen.h
---- linux-2.6.18.3/include/xen/interface/xen.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/xen.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/xen.h b/include/xen/interface/xen.h
+--- a/include/xen/interface/xen.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/xen.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,581 @@
 +/******************************************************************************
 + * xen.h
@@ -91428,9 +86141,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/interface/xenoprof.h linux-2.6.18-xen/include/xen/interface/xenoprof.h
---- linux-2.6.18.3/include/xen/interface/xenoprof.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/interface/xenoprof.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/interface/xenoprof.h b/include/xen/interface/xenoprof.h
+--- a/include/xen/interface/xenoprof.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/interface/xenoprof.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,110 @@
 +/******************************************************************************
 + * xenoprof.h
@@ -91542,9 +86255,9 @@
 + * indent-tabs-mode: nil
 + * End:
 + */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/pcifront.h linux-2.6.18-xen/include/xen/pcifront.h
---- linux-2.6.18.3/include/xen/pcifront.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/pcifront.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/pcifront.h b/include/xen/pcifront.h
+--- a/include/xen/pcifront.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/pcifront.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,76 @@
 +/*
 + * PCI Frontend - arch-dependendent declarations
@@ -91622,9 +86335,9 @@
 +#endif /* __KERNEL__ */
 +
 +#endif /* __XEN_ASM_PCIFRONT_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/public/evtchn.h linux-2.6.18-xen/include/xen/public/evtchn.h
---- linux-2.6.18.3/include/xen/public/evtchn.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/public/evtchn.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/public/evtchn.h b/include/xen/public/evtchn.h
+--- a/include/xen/public/evtchn.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/public/evtchn.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,88 @@
 +/******************************************************************************
 + * evtchn.h
@@ -91714,9 +86427,9 @@
 +	_IOC(_IOC_NONE, 'E', 5, 0)
 +
 +#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/public/privcmd.h linux-2.6.18-xen/include/xen/public/privcmd.h
---- linux-2.6.18.3/include/xen/public/privcmd.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/public/privcmd.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/public/privcmd.h b/include/xen/public/privcmd.h
+--- a/include/xen/public/privcmd.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/public/privcmd.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,79 @@
 +/******************************************************************************
 + * privcmd.h
@@ -91797,9 +86510,9 @@
 +	_IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
 +
 +#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/xenbus.h linux-2.6.18-xen/include/xen/xenbus.h
---- linux-2.6.18.3/include/xen/xenbus.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/xenbus.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/xenbus.h b/include/xen/xenbus.h
+--- a/include/xen/xenbus.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/xenbus.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,306 @@
 +/******************************************************************************
 + * xenbus.h
@@ -92107,9 +86820,9 @@
 +int xenbus_frontend_closed(struct xenbus_device *dev);
 +
 +#endif /* _XEN_XENBUS_H */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/xencons.h linux-2.6.18-xen/include/xen/xencons.h
---- linux-2.6.18.3/include/xen/xencons.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/xencons.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/xencons.h b/include/xen/xencons.h
+--- a/include/xen/xencons.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/xencons.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,17 @@
 +#ifndef __ASM_XENCONS_H__
 +#define __ASM_XENCONS_H__
@@ -92121,16 +86834,16 @@
 +void xencons_resume(void);
 +
 +/* Interrupt work hooks. Receive data, or kick data out. */
-+void xencons_rx(char *buf, unsigned len, struct pt_regs *regs);
++void xencons_rx(char *buf, unsigned len);
 +void xencons_tx(void);
 +
 +int xencons_ring_init(void);
 +int xencons_ring_send(const char *data, unsigned len);
 +
 +#endif /* __ASM_XENCONS_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/include/xen/xen_proc.h linux-2.6.18-xen/include/xen/xen_proc.h
---- linux-2.6.18.3/include/xen/xen_proc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/include/xen/xen_proc.h	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/include/xen/xen_proc.h b/include/xen/xen_proc.h
+--- a/include/xen/xen_proc.h	1970-01-01 01:00:00.000000000 +0100
++++ b/include/xen/xen_proc.h	2007-03-14 10:55:52.000000000 +0100
 @@ -0,0 +1,12 @@
 +
 +#ifndef __ASM_XEN_PROC_H__
@@ -92144,10 +86857,10 @@
 +	const char *name);
 +
 +#endif /* __ASM_XEN_PROC_H__ */
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/kernel/fork.c linux-2.6.18-xen/kernel/fork.c
---- linux-2.6.18.3/kernel/fork.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/kernel/fork.c	2006-11-19 14:27:16.000000000 +0100
-@@ -276,6 +276,9 @@
+diff -x '.hg*' -x '.git*' -urN a/kernel/fork.c b/kernel/fork.c
+--- a/kernel/fork.c	2007-03-16 18:49:45.000000000 +0100
++++ b/kernel/fork.c	2007-03-14 10:55:52.000000000 +0100
+@@ -286,6 +286,9 @@
  		if (retval)
  			goto out;
  	}
@@ -92157,10 +86870,10 @@
  	retval = 0;
  out:
  	up_write(&mm->mmap_sem);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/kernel/irq/manage.c linux-2.6.18-xen/kernel/irq/manage.c
---- linux-2.6.18.3/kernel/irq/manage.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/kernel/irq/manage.c	2006-12-05 18:42:37.000000000 +0100
-@@ -350,7 +350,6 @@
+diff -x '.hg*' -x '.git*' -urN a/kernel/irq/manage.c b/kernel/irq/manage.c
+--- a/kernel/irq/manage.c	2007-03-16 18:49:45.000000000 +0100
++++ b/kernel/irq/manage.c	2007-03-14 10:55:52.000000000 +0100
+@@ -358,7 +358,6 @@
  	struct irqaction **p;
  	unsigned long flags;
  
@@ -92168,9 +86881,9 @@
  	if (irq >= NR_IRQS)
  		return;
  
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/kernel/Kconfig.preempt linux-2.6.18-xen/kernel/Kconfig.preempt
---- linux-2.6.18.3/kernel/Kconfig.preempt	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/kernel/Kconfig.preempt	2006-11-19 14:27:16.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+--- a/kernel/Kconfig.preempt	2007-03-12 21:58:16.000000000 +0100
++++ b/kernel/Kconfig.preempt	2007-03-14 10:55:52.000000000 +0100
 @@ -35,6 +35,7 @@
  
  config PREEMPT
@@ -92179,35 +86892,23 @@
  	help
  	  This option reduces the latency of the kernel by making
  	  all kernel code (that is not executing in a critical section)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/lib/Kconfig.debug linux-2.6.18-xen/lib/Kconfig.debug
---- linux-2.6.18.3/lib/Kconfig.debug	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/lib/Kconfig.debug	2006-11-19 14:27:16.000000000 +0100
-@@ -325,7 +325,7 @@
- 
- config UNWIND_INFO
- 	bool "Compile the kernel with frame unwind information"
--	depends on !IA64 && !PARISC
-+	depends on !IA64 && !PARISC && !X86_64_XEN
- 	depends on !MODULES || !(MIPS || PPC || SUPERH || V850)
- 	help
- 	  If you say Y here the resulting kernel image will be slightly larger
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/lib/Makefile linux-2.6.18-xen/lib/Makefile
---- linux-2.6.18.3/lib/Makefile	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/lib/Makefile	2006-11-19 14:27:16.000000000 +0100
-@@ -52,6 +52,9 @@
- obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+diff -x '.hg*' -x '.git*' -urN a/lib/Makefile b/lib/Makefile
+--- a/lib/Makefile	2007-03-16 18:49:45.000000000 +0100
++++ b/lib/Makefile	2007-03-14 10:55:52.000000000 +0100
+@@ -57,6 +57,9 @@
  
  obj-$(CONFIG_SWIOTLB) += swiotlb.o
+ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
 +ifneq ($(CONFIG_XEN_IA64_DOM0_NON_VP),y)
 +swiotlb-$(CONFIG_XEN) := ../arch/i386/kernel/swiotlb.o
 +endif
  
- hostprogs-y	:= gen_crc32table
- clean-files	:= crc32table.h
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/mm/highmem.c linux-2.6.18-xen/mm/highmem.c
---- linux-2.6.18.3/mm/highmem.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/mm/highmem.c	2006-11-19 14:27:16.000000000 +0100
-@@ -142,6 +142,17 @@
+ lib-$(CONFIG_GENERIC_BUG) += bug.o
+ 
+diff -x '.hg*' -x '.git*' -urN a/mm/highmem.c b/mm/highmem.c
+--- a/mm/highmem.c	2007-03-16 18:49:45.000000000 +0100
++++ b/mm/highmem.c	2007-03-14 10:55:52.000000000 +0100
+@@ -148,6 +148,17 @@
  	return vaddr;
  }
  
@@ -92225,18 +86926,10 @@
  void fastcall *kmap_high(struct page *page)
  {
  	unsigned long vaddr;
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/mm/Kconfig linux-2.6.18-xen/mm/Kconfig
---- linux-2.6.18.3/mm/Kconfig	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/mm/Kconfig	2006-11-19 14:27:16.000000000 +0100
-@@ -116,7 +116,6 @@
- config MEMORY_HOTPLUG
- 	bool "Allow for memory hot-add"
- 	depends on SPARSEMEM && HOTPLUG && !SOFTWARE_SUSPEND && ARCH_ENABLE_MEMORY_HOTPLUG
--	depends on (IA64 || X86 || PPC64)
- 
- comment "Memory hotplug is currently incompatible with Software Suspend"
- 	depends on SPARSEMEM && HOTPLUG && SOFTWARE_SUSPEND
-@@ -127,11 +126,14 @@
+diff -x '.hg*' -x '.git*' -urN a/mm/Kconfig b/mm/Kconfig
+--- a/mm/Kconfig	2007-03-16 18:49:45.000000000 +0100
++++ b/mm/Kconfig	2007-03-14 10:55:52.000000000 +0100
+@@ -132,11 +132,14 @@
  # Default to 4 for wider testing, though 8 might be more appropriate.
  # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
  # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
@@ -92251,10 +86944,10 @@
  	default "4"
  
  #
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/mm/memory.c linux-2.6.18-xen/mm/memory.c
---- linux-2.6.18.3/mm/memory.c	2006-12-06 09:06:09.000000000 +0100
-+++ linux-2.6.18-xen/mm/memory.c	2006-12-05 18:42:37.000000000 +0100
-@@ -403,7 +403,8 @@
+diff -x '.hg*' -x '.git*' -urN a/mm/memory.c b/mm/memory.c
+--- a/mm/memory.c	2007-03-16 18:49:45.000000000 +0100
++++ b/mm/memory.c	2007-03-14 10:55:53.000000000 +0100
+@@ -404,7 +404,8 @@
  	 * and that the resulting page looks ok.
  	 */
  	if (unlikely(!pfn_valid(pfn))) {
@@ -92264,7 +86957,7 @@
  		return NULL;
  	}
  
-@@ -891,6 +892,7 @@
+@@ -896,6 +897,7 @@
  		tlb_finish_mmu(tlb, address, end);
  	return end;
  }
@@ -92272,7 +86965,7 @@
  
  /*
   * Do a quick page-table lookup for a single page.
-@@ -1030,6 +1032,26 @@
+@@ -1035,6 +1037,26 @@
  			continue;
  		}
  
@@ -92299,7 +86992,7 @@
  		if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
  				|| !(vm_flags & vma->vm_flags))
  			return i ? : -EFAULT;
-@@ -1369,6 +1391,102 @@
+@@ -1403,6 +1425,102 @@
  }
  EXPORT_SYMBOL(remap_pfn_range);
  
@@ -92402,10 +87095,10 @@
  /*
   * handle_pte_fault chooses page fault handler according to an entry
   * which was read non-atomically.  Before making any commitment, on
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/mm/mmap.c linux-2.6.18-xen/mm/mmap.c
---- linux-2.6.18.3/mm/mmap.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/mm/mmap.c	2006-11-19 14:27:16.000000000 +0100
-@@ -1963,6 +1963,10 @@
+diff -x '.hg*' -x '.git*' -urN a/mm/mmap.c b/mm/mmap.c
+--- a/mm/mmap.c	2007-03-16 18:49:45.000000000 +0100
++++ b/mm/mmap.c	2007-03-14 10:55:53.000000000 +0100
+@@ -1977,6 +1977,10 @@
  	unsigned long nr_accounted = 0;
  	unsigned long end;
  
@@ -92416,33 +87109,40 @@
  	lru_add_drain();
  	flush_cache_mm(mm);
  	tlb = tlb_gather_mmu(mm, 1);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/mm/page_alloc.c linux-2.6.18-xen/mm/page_alloc.c
---- linux-2.6.18.3/mm/page_alloc.c	2006-12-06 09:06:09.000000000 +0100
-+++ linux-2.6.18-xen/mm/page_alloc.c	2006-12-05 18:42:37.000000000 +0100
-@@ -443,7 +443,8 @@
+diff -x '.hg*' -x '.git*' -urN a/mm/page_alloc.c b/mm/page_alloc.c
+--- a/mm/page_alloc.c	2007-03-16 18:49:45.000000000 +0100
++++ b/mm/page_alloc.c	2007-03-14 10:55:53.000000000 +0100
+@@ -490,6 +490,8 @@
  	int i;
  	int reserved = 0;
  
--	arch_free_page(page, order);
 +	if (arch_free_page(page, order))
 +		return;
+ 	for (i = 0 ; i < (1 << order) ; ++i)
+ 		reserved += free_pages_check(page + i);
+ 	if (reserved)
+@@ -497,7 +499,6 @@
+ 
  	if (!PageHighMem(page))
- 		debug_check_no_locks_freed(page_address(page),
- 					   PAGE_SIZE<<order);
-@@ -717,7 +718,8 @@
+ 		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
+-	arch_free_page(page, order);
+ 	kernel_map_pages(page, 1 << order, 0);
+ 
+ 	local_irq_save(flags);
+@@ -784,6 +785,9 @@
  	struct per_cpu_pages *pcp;
  	unsigned long flags;
  
--	arch_free_page(page, 0);
 +	if (arch_free_page(page, 0))
 +		return;
- 
++
  	if (PageAnon(page))
  		page->mapping = NULL;
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/net/core/dev.c linux-2.6.18-xen/net/core/dev.c
---- linux-2.6.18.3/net/core/dev.c	2006-12-06 09:06:10.000000000 +0100
-+++ linux-2.6.18-xen/net/core/dev.c	2006-12-05 18:42:37.000000000 +0100
-@@ -118,6 +118,12 @@
+ 	if (free_pages_check(page))
+diff -x '.hg*' -x '.git*' -urN a/net/core/dev.c b/net/core/dev.c
+--- a/net/core/dev.c	2007-03-16 18:49:45.000000000 +0100
++++ b/net/core/dev.c	2007-03-14 10:55:53.000000000 +0100
+@@ -117,6 +117,12 @@
  #include <linux/err.h>
  #include <linux/ctype.h>
  
@@ -92455,7 +87155,7 @@
  /*
   *	The list of packet types we will receive (as opposed to discard)
   *	and the routines to invoke.
-@@ -1391,6 +1397,43 @@
+@@ -1392,6 +1398,43 @@
  	}						\
  }
  
@@ -92484,7 +87184,7 @@
 +		}
 +		if ((skb->h.raw + skb->csum + 2) > skb->tail)
 +			goto out;
-+		skb->ip_summed = CHECKSUM_HW;
++		skb->ip_summed = CHECKSUM_PARTIAL;
 +		skb->proto_csum_blank = 0;
 +	}
 +	return 0;
@@ -92499,7 +87199,7 @@
  /**
   *	dev_queue_xmit - transmit a buffer
   *	@skb: buffer to transmit
-@@ -1423,6 +1466,12 @@
+@@ -1424,6 +1467,12 @@
  	struct Qdisc *q;
  	int rc = -ENOMEM;
  
@@ -92512,7 +87212,7 @@
  	/* GSO will handle the following emulations directly. */
  	if (netif_needs_gso(dev, skb))
  		goto gso;
-@@ -1798,6 +1847,19 @@
+@@ -1799,6 +1848,19 @@
  	}
  #endif
  
@@ -92521,7 +87221,7 @@
 +	case CHECKSUM_UNNECESSARY:
 +		skb->proto_data_valid = 1;
 +		break;
-+	case CHECKSUM_HW:
++	case CHECKSUM_PARTIAL:
 +		/* XXX Implement me. */
 +	default:
 +		skb->proto_data_valid = 0;
@@ -92532,7 +87232,7 @@
  	list_for_each_entry_rcu(ptype, &ptype_all, list) {
  		if (!ptype->dev || ptype->dev == skb->dev) {
  			if (pt_prev) 
-@@ -3584,6 +3646,7 @@
+@@ -3563,6 +3625,7 @@
  EXPORT_SYMBOL(net_enable_timestamp);
  EXPORT_SYMBOL(net_disable_timestamp);
  EXPORT_SYMBOL(dev_get_flags);
@@ -92540,18 +87240,18 @@
  
  #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
  EXPORT_SYMBOL(br_handle_frame_hook);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/net/core/skbuff.c linux-2.6.18-xen/net/core/skbuff.c
---- linux-2.6.18.3/net/core/skbuff.c	2006-12-06 09:06:10.000000000 +0100
-+++ linux-2.6.18-xen/net/core/skbuff.c	2006-12-05 18:42:37.000000000 +0100
-@@ -139,6 +139,7 @@
+diff -x '.hg*' -x '.git*' -urN a/net/core/skbuff.c b/net/core/skbuff.c
+--- a/net/core/skbuff.c	2007-03-16 18:49:45.000000000 +0100
++++ b/net/core/skbuff.c	2007-03-14 10:55:53.000000000 +0100
+@@ -141,6 +141,7 @@
   *	Buffers may only be allocated from interrupts using a @gfp_mask of
   *	%GFP_ATOMIC.
   */
 +#ifndef CONFIG_HAVE_ARCH_ALLOC_SKB
  struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
- 			    int fclone)
+ 			    int fclone, int node)
  {
-@@ -193,6 +194,7 @@
+@@ -196,6 +197,7 @@
  	skb = NULL;
  	goto out;
  }
@@ -92559,15 +87259,15 @@
  
  /**
   *	alloc_skb_from_cache	-	allocate a network buffer
-@@ -210,14 +212,17 @@
+@@ -213,14 +215,17 @@
   */
- struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+ struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
  				     unsigned int size,
 -				     gfp_t gfp_mask)
 +				     gfp_t gfp_mask,
 +				     int fclone)
  {
-+	kmem_cache_t *cache;
++	struct kmem_cache *cache;
  	struct sk_buff *skb;
  	u8 *data;
  
@@ -92580,7 +87280,7 @@
  	if (!skb)
  		goto out;
  
-@@ -241,10 +246,20 @@
+@@ -244,10 +249,20 @@
  	skb_shinfo(skb)->gso_segs = 0;
  	skb_shinfo(skb)->gso_type = 0;
  	skb_shinfo(skb)->frag_list = NULL;
@@ -92602,7 +87302,7 @@
  	skb = NULL;
  	goto out;
  }
-@@ -464,6 +479,10 @@
+@@ -468,6 +483,10 @@
  	C(local_df);
  	n->cloned = 1;
  	n->nohdr = 0;
@@ -92613,43 +87313,48 @@
  	C(pkt_type);
  	C(ip_summed);
  	C(priority);
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/net/ipv4/netfilter/ip_nat_proto_tcp.c linux-2.6.18-xen/net/ipv4/netfilter/ip_nat_proto_tcp.c
---- linux-2.6.18.3/net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-11-19 14:27:18.000000000 +0100
-@@ -129,7 +129,12 @@
+diff -x '.hg*' -x '.git*' -urN a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
+--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c	2007-03-16 18:49:45.000000000 +0100
++++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c	2007-03-14 10:55:54.000000000 +0100
+@@ -129,8 +129,15 @@
  	if (hdrsize < sizeof(*hdr))
  		return 1;
  
--	hdr->check = ip_nat_cheat_check(~oldip, newip,
 +#ifdef CONFIG_XEN
 +	if ((*pskb)->proto_csum_blank)
-+		hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
++		nf_csum_replace4(&hdr->check, oldip, newip);
 +	else
 +#endif
-+		hdr->check = ip_nat_cheat_check(~oldip, newip,
- 					ip_nat_cheat_check(oldport ^ 0xFFFF,
- 							   newport,
- 							   hdr->check));
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/net/ipv4/netfilter/ip_nat_proto_udp.c linux-2.6.18-xen/net/ipv4/netfilter/ip_nat_proto_udp.c
---- linux-2.6.18.3/net/ipv4/netfilter/ip_nat_proto_udp.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/net/ipv4/netfilter/ip_nat_proto_udp.c	2006-11-19 14:27:18.000000000 +0100
-@@ -114,7 +114,12 @@
- 		portptr = &hdr->dest;
++	{
+ 	nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
+ 	nf_proto_csum_replace2(&hdr->check, *pskb, oldport, newport, 0);
++	}
+ 	return 1;
+ }
+ 
+diff -x '.hg*' -x '.git*' -urN a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
+--- a/net/ipv4/netfilter/ip_nat_proto_udp.c	2007-03-16 18:49:45.000000000 +0100
++++ b/net/ipv4/netfilter/ip_nat_proto_udp.c	2007-03-14 10:55:54.000000000 +0100
+@@ -115,8 +115,16 @@
  	}
- 	if (hdr->check) /* 0 is a special case meaning no checksum */
--		hdr->check = ip_nat_cheat_check(~oldip, newip,
+ 
+ 	if (hdr->check || (*pskb)->ip_summed == CHECKSUM_PARTIAL) {
 +#ifdef CONFIG_XEN
 +		if ((*pskb)->proto_csum_blank)
-+			hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
++			nf_csum_replace4(&hdr->check, oldip, newip);
 +		else
 +#endif
-+			hdr->check = ip_nat_cheat_check(~oldip, newip,
- 					ip_nat_cheat_check(*portptr ^ 0xFFFF,
- 							   newport,
- 							   hdr->check));
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/net/ipv4/xfrm4_output.c linux-2.6.18-xen/net/ipv4/xfrm4_output.c
---- linux-2.6.18.3/net/ipv4/xfrm4_output.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/net/ipv4/xfrm4_output.c	2006-11-19 14:27:18.000000000 +0100
++		{
+ 		nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
+ 		nf_proto_csum_replace2(&hdr->check, *pskb, *portptr, newport, 0);
++		}
++
+ 		if (!hdr->check)
+ 			hdr->check = CSUM_MANGLED_0;
+ 	}
+diff -x '.hg*' -x '.git*' -urN a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
+--- a/net/ipv4/xfrm4_output.c	2007-03-16 18:49:45.000000000 +0100
++++ b/net/ipv4/xfrm4_output.c	2007-03-14 10:55:54.000000000 +0100
 @@ -18,6 +18,8 @@
  #include <net/xfrm.h>
  #include <net/icmp.h>
@@ -92667,21 +87372,22 @@
 +	if (err)
 +		goto error_nolock;
 +
- 	if (skb->ip_summed == CHECKSUM_HW) {
- 		err = skb_checksum_help(skb, 0);
+ 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ 		err = skb_checksum_help(skb);
  		if (err)
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/net/ipv6/addrconf.c linux-2.6.18-xen/net/ipv6/addrconf.c
---- linux-2.6.18.3/net/ipv6/addrconf.c	2006-09-20 05:42:06.000000000 +0200
-+++ linux-2.6.18-xen/net/ipv6/addrconf.c	2006-11-19 14:27:18.000000000 +0100
-@@ -2514,6 +2514,7 @@
+diff -x '.hg*' -x '.git*' -urN a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+--- a/net/ipv6/addrconf.c	2007-03-16 18:49:45.000000000 +0100
++++ b/net/ipv6/addrconf.c	2007-03-14 10:55:54.000000000 +0100
+@@ -2553,6 +2553,8 @@
  	spin_lock_bh(&ifp->lock);
  
  	if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
++
 +	    !(dev->flags&IFF_MULTICAST) ||
- 	    !(ifp->flags&IFA_F_TENTATIVE)) {
+ 	    !(ifp->flags&IFA_F_TENTATIVE) ||
+ 	    ifp->flags & IFA_F_NODAD) {
  		ifp->flags &= ~IFA_F_TENTATIVE;
- 		spin_unlock_bh(&ifp->lock);
-@@ -2598,6 +2599,7 @@
+@@ -2638,6 +2640,7 @@
  	if (ifp->idev->cnf.forwarding == 0 &&
  	    ifp->idev->cnf.rtr_solicits > 0 &&
  	    (dev->flags&IFF_LOOPBACK) == 0 &&
@@ -92689,9 +87395,9 @@
  	    (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
  		struct in6_addr all_routers;
  
-diff -urN -x .hg -x .hgtags linux-2.6.18.3/scripts/Makefile.xen linux-2.6.18-xen/scripts/Makefile.xen
---- linux-2.6.18.3/scripts/Makefile.xen	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.18-xen/scripts/Makefile.xen	2006-11-19 14:27:19.000000000 +0100
+diff -x '.hg*' -x '.git*' -urN a/scripts/Makefile.xen b/scripts/Makefile.xen
+--- a/scripts/Makefile.xen	1970-01-01 01:00:00.000000000 +0100
++++ b/scripts/Makefile.xen	2007-03-14 10:55:56.000000000 +0100
 @@ -0,0 +1,14 @@
 +
 +# cherrypickxen($1 = allobj)

Added: dists/trunk/linux-2.6/debian/patches/features/all/xen/update.patch
==============================================================================
--- (empty file)
+++ dists/trunk/linux-2.6/debian/patches/features/all/xen/update.patch	Fri Mar 16 19:32:46 2007
@@ -0,0 +1,80 @@
+diff -r 563a16543a96 arch/i386/Kconfig
+--- a/arch/i386/Kconfig	Mon Mar 12 14:07:24 2007 +0100
++++ b/arch/i386/Kconfig	Fri Mar 16 20:29:31 2007 +0100
+@@ -200,7 +200,7 @@ config PARAVIRT
+ config PARAVIRT
+ 	bool "Paravirtualization support (EXPERIMENTAL)"
+ 	depends on EXPERIMENTAL
+-	depends on !(X86_VISWS || X86_VOYAGER)
++	depends on !(X86_VISWS || X86_VOYAGER || X86_XEN)
+ 	help
+ 	  Paravirtualization is a way of running multiple instances of
+ 	  Linux on the same machine, under a hypervisor.  This option
+diff -r 563a16543a96 arch/i386/kernel/microcode-xen.c
+--- a/arch/i386/kernel/microcode-xen.c	Mon Mar 12 14:07:24 2007 +0100
++++ b/arch/i386/kernel/microcode-xen.c	Fri Mar 16 20:15:51 2007 +0100
+@@ -52,30 +52,32 @@ MODULE_LICENSE("GPL");
+ /* no concurrent ->write()s are allowed on /dev/cpu/microcode */
+ static DEFINE_MUTEX(microcode_mutex);
+ 
+-static void __user *user_buffer;	/* user area microcode data buffer */
+-static unsigned int user_buffer_size;	/* it's size */
+-				
+ static int microcode_open (struct inode *unused1, struct file *unused2)
+ {
+ 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+ }
+ 
+ 
+-static int do_microcode_update (void)
++static int do_microcode_update (const void __user *ubuf, size_t len)
+ {
+ 	int err;
+-	dom0_op_t op;
++	void *kbuf;
+ 
+-	err = sys_mlock((unsigned long)user_buffer, user_buffer_size);
+-	if (err != 0)
+-		return err;
++	kbuf = vmalloc(len);
++	if (!kbuf)
++		return -ENOMEM;
+ 
+-	op.cmd = DOM0_MICROCODE;
+-	set_xen_guest_handle(op.u.microcode.data, user_buffer);
+-	op.u.microcode.length = user_buffer_size;
+-	err = HYPERVISOR_dom0_op(&op);
++	if (copy_from_user(kbuf, ubuf, len) == 0) {
++		dom0_op_t op;
+ 
+-	(void)sys_munlock((unsigned long)user_buffer, user_buffer_size);
++		op.cmd = DOM0_MICROCODE;
++		set_xen_guest_handle(op.u.microcode.data, kbuf);
++		op.u.microcode.length = len;
++		err = HYPERVISOR_dom0_op(&op);
++	} else
++		err = -EFAULT;
++
++	vfree(kbuf);
+ 
+ 	return err;
+ }
+@@ -89,17 +91,9 @@ static ssize_t microcode_write (struct f
+ 		return -EINVAL;
+ 	}
+ 
+-	if ((len >> PAGE_SHIFT) > num_physpages) {
+-		printk(KERN_ERR "microcode: too much data (max %ld pages)\n", num_physpages);
+-		return -EINVAL;
+-	}
+-
+ 	mutex_lock(&microcode_mutex);
+ 
+-	user_buffer = (void __user *) buf;
+-	user_buffer_size = (int) len;
+-
+-	ret = do_microcode_update();
++	ret = do_microcode_update(buf, len);
+ 	if (!ret)
+ 		ret = (ssize_t)len;
+ 

Modified: dists/trunk/linux-2.6/debian/patches/series/1~experimental.1-extra
==============================================================================
--- dists/trunk/linux-2.6/debian/patches/series/1~experimental.1-extra	(original)
+++ dists/trunk/linux-2.6/debian/patches/series/1~experimental.1-extra	Fri Mar 16 19:32:46 2007
@@ -1,6 +1,7 @@
 + features/all/vserver/vs2.2.0-rc15.patch *_vserver *_xen-vserver
 + features/all/vserver/bindmount-dev.patch *_vserver *_xen-vserver
 + features/all/xen/vserver-clash.patch *_xen-vserver
-#+ features/all/xen/fedora-36252.patch *_xen *_xen-vserver
++ features/all/xen/fedora-2.6.20-48670.patch *_xen *_xen-vserver
++ features/all/xen/update.patch *_xen *_xen-vserver
 + features/all/xen/vserver-update.patch *_xen-vserver
 + bugfix/arm/nas100d-artop-temp-fix.patch arm



More information about the Kernel-svn-changes mailing list